Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion dissect/target/helpers/descriptor_extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,14 @@ def _fill_default_fields(self, record_kwargs: dict[str, Any]) -> dict[str, Any]:
user_home = user.home

record_kwargs.update(
{"username": username, "user_id": user_id, "user_group": user_group, "user_home": user_home}
(k, v)
for (k, v) in {
"username": username,
"user_id": user_id,
"user_group": user_group,
"user_home": user_home,
}.items()
if v is not None
)
return record_kwargs

Expand Down
Empty file.
234 changes: 234 additions & 0 deletions dissect/target/plugins/os/windows/bits/_plugin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,234 @@
from __future__ import annotations

import struct
import typing
import uuid

from dissect.database.ese import ESE
from dissect.database.ese import Table as EseTable
from dissect.util.ts import wintimestamp

from dissect.target.exceptions import UnsupportedPluginError
from dissect.target.helpers.descriptor_extensions import UserRecordDescriptorExtension
from dissect.target.helpers.record import create_extended_descriptor
from dissect.target.plugin import Plugin, export
from dissect.target.plugins.os.windows.bits.c_bits import c_bits

if typing.TYPE_CHECKING:
from collections.abc import Iterator
from pathlib import Path

from dissect.target import Target

BitsRecord = create_extended_descriptor([UserRecordDescriptorExtension])(
"windows/filesystem/bits",
[
("string", "job_type"),
("string", "state"),
("string", "priority"),
("string", "job_id"),
("string", "name"),
("string", "desc"),
("string", "callback_cmd"), # Command executed when "notify_flag" event occur (mainly download complete/error)
("string", "callback_args"),
("string", "notify_flag"),
("boolean", "has_error"),
("datetime", "job_ctime"), # Job creation time
("datetime", "job_mtime"), # Job modification time (E.g file added)
("datetime", "job_mtime_bis"), # Another job modification time, but different that job mtime.
# E.g this ts is also modified on transfer completed
("datetime", "job_completion_time"), # Transfer completion time
("datetime", "transferred_file_mtime"), # mtime of file from source for download, or from fs for uploads
("string", "file_guid"), # technical internal information,
# file guid is mentioned in jobs and allows to map jobs with file
("string", "file_dst"),
("string", "file_src"),
("string", "file_tmp"),
("varint", "file_dl_size"), # size effectively downloaded/uploaded
("varint", "file_transfer_size"), # File size
("string", "file_drive"),
("string", "file_volume"),
("path", "source"),
],
)

# UUID('7756da36-516f-435a-acac-44a248fff34d')

FILE_LIST_STORAGE_GUID = b"\x36\xda\x56\x77\x6f\x51\x5a\x43\xac\xac\x44\xa2\x48\xff\xf3\x4d"


class BitsPlugin(Plugin):
"""Windows Bits (Background Intelligent Transfer Service) plugin. Only support post Win 10 format"""

def __init__(self, target: Target):
super().__init__(target)
self.qmgr_db_paths = self.get_paths()

def _get_paths(self) -> Iterator[Path]:
qmgr_db_path = self.target.fs.path("sysvol/ProgramData/Microsoft/Network/Downloader/qmgr.db")
if qmgr_db_path.exists():
yield qmgr_db_path

def check_compatible(self) -> None:
if not self.qmgr_db_paths:
raise UnsupportedPluginError("No qmgr ESE database found")

def build_files_dict(self, file_table: EseTable) -> dict[str, bytes]:
files_dict = {}
for entry in file_table.records():
files_dict[entry["Id"].lower()] = entry["Blob"]
return files_dict

def get_job_error_len(self, blob: bytes) -> int:
"""
Get length of a structure containing information related to error.

Basic structure pattern

struct JobError{
u32 has_error;
if (has_error != 0x00) {
u32 unk1;
s32 unk2;
u32 unk3;
u32 unk4;
u32 unk5;
u8 has_persistent_state;
if (has_persistent_state != 0x00) {
u32 persistent_state_len;
u16 persistent_state[flag];
}
}
};

:param blob:
:return:
"""
if blob[4 * 5 : 4 * 5 + 1] == b"\x00":
return 21
return 21 + struct.unpack("<L", blob[4 * 5 + 2 : 4 * 5 + 6])[0] + 2

@export(record=[BitsRecord])
def qmgr_ese(self) -> Iterator[BitsRecord]:
"""Return entries found in background intelligent transfer service ESE database. (Windows 10 or later).

Version pre windows 10 use a different format

References:
- https://github.com/fireeye/BitsParser
- https://github.com/ANSSI-FR/bits_parser
- https://cloud.google.com/blog/topics/threat-intelligence/attacker-use-of-windows-background-intelligent-transfer-service/
- /windows/system32/qmgr.dll
"""
for db_path in self.qmgr_db_paths:
with db_path.open("rb") as fh:
db = ESE(fh)
table = db.table("Jobs")
# Jobs get ID of files
# We start by building a dict with all files
files_dict = self.build_files_dict(db.table("Files"))
for record in table.records():
ctime = None
mtime = None
mtime_bis = None
has_error = False
completion_time = None
a = record["Blob"]
# These bytes indicate if job is an upload/download/upload repy job
# For some jobs, especially upload jobs we may have 2 job GUID, we need to skip the first
# to parse the structure properly
if a[0x1C:0x20] not in [b"\x00\x00\x00\x00", b"\x02\x00\x00\x00", b"\x01\x00\x00\x00"]:
a = a[0x10:]
entry = c_bits.BitsJobsHeader(a)
# Jobs header is followed by a security descriptor section.
# Section total length depends on header guid/version
# As this is prone to errors, we skip this section.
# Especially since next section has a known start sequence
# Then we have a list of all files related to this jobs
storage_guid_list = a[len(entry) :].split(FILE_LIST_STORAGE_GUID)
if len(storage_guid_list) > 1:
file_guid_list = c_bits.BitsJobsFileGuidList(storage_guid_list[1])
metadata_section_offset = 4
if len(storage_guid_list) > 2:
job_has_error = storage_guid_list[2][:4] != b"\x00\x00\x00\x00"
if job_has_error:
# to find job has error section
has_error = True
metadata_section_offset += self.get_job_error_len(storage_guid_list[2][4:])
metadata_section = c_bits.BitsMetadata(storage_guid_list[2][metadata_section_offset:])
ctime = wintimestamp(metadata_section.ctime)
mtime = wintimestamp(metadata_section.mtime)
mtime_bis = wintimestamp(metadata_section.mtime_bis)
completion_time = (
wintimestamp(metadata_section.completion_time)
if metadata_section.completion_time != 0
else None
)

user_sid = entry.sid.strip("\x00")
user = None
if user_sid and (sid_user_details := self.target.user_details.find(user_sid)):
user = sid_user_details.user
entry_yielded = False
for file_entry in file_guid_list.files_guid:
file_guid = uuid.UUID(bytes_le=file_entry)
if file_blob := files_dict.get(str(file_guid).lower()):
f = (
c_bits.DownloadBitsFile(file_blob)
if entry.type.name == "DOWNLOAD"
else c_bits.UploadBitsFile(file_blob)
)
transferred_file_mtime = wintimestamp(f.file_mtime) if f.file_mtime != 0 else None
yield BitsRecord(
job_type=entry.type,
state=entry.state,
priority=entry.priority,
job_id=uuid.UUID(bytes_le=entry.job_id),
name=entry.name.strip("\x00"),
desc=entry.desc.strip("\x00"),
callback_cmd=entry.callback_cmd.strip("\x00"),
callback_args=entry.callback_args.strip("\x00"),
notify_flag=entry.notify_flag,
has_error=has_error,
job_ctime=ctime,
job_mtime=mtime,
job_mtime_bis=mtime_bis,
job_completion_time=completion_time,
file_guid=file_guid,
file_drive=f.drive.strip("\x00"),
file_dst=f.dst.strip("\x00"),
file_src=f.src.strip("\x00"),
file_tmp=f.tmp.strip("\x00"),
file_volume=f.volume.strip("\x00"),
file_dl_size=f.dl_size,
# -1 == Unknown file size
file_transfer_size=f.transfer_size if int(f.transfer_size) != -1 else None,
transferred_file_mtime=transferred_file_mtime,
user_id=user_sid,
_user=user,
_target=self.target,
source=db_path,
)
entry_yielded = True
# if a job has no files related or not found, we yield data that we have
if not entry_yielded:
yield BitsRecord(
job_type=entry.type,
state=entry.state,
priority=entry.priority,
job_id=uuid.UUID(bytes_le=entry.job_id),
name=entry.name.strip("\x00"),
desc=entry.desc.strip("\x00"),
callback_cmd=entry.callback_cmd.strip("\x00"),
callback_args=entry.callback_args.strip("\x00"),
notify_flag=entry.notify_flag,
has_error=has_error,
job_ctime=ctime,
job_mtime=mtime,
job_mtime_bis=mtime_bis,
job_completion_time=completion_time,
user_id=user_sid,
_user=user,
_target=self.target,
source=db_path,
)
135 changes: 135 additions & 0 deletions dissect/target/plugins/os/windows/bits/c_bits.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
from __future__ import annotations

from dissect.cstruct import cstruct

bits_def = """
// https://learn.microsoft.com/en-us/windows/win32/api/bits/ne-bits-bg_job_type
enum BG_JOB_TYPE : uint32 {
DOWNLOAD = 0x0,
UPLOAD = 0x1,
UPLOAD_REPLY = 0x2
};

// https://learn.microsoft.com/en-us/windows/win32/api/bits/ne-bits-bg_job_priority
enum BG_JOB_PRIORITY : uint32 {
FOREGROUND = 0x0,
HIGH = 0x1,
NORMAL = 0x2,
LOW = 0x3
};

// https://learn.microsoft.com/en-us/windows/win32/api/bits/nf-bits-ibackgroundcopyjob-setnotifyflags
flag BG_NOTIFY : uint32 {
JOB_TRANSFERRED = 0x1,
JOB_ERROR = 0x2,
DISABLE = 0x4,
JOB_MODIFICATION = 0x8,
FILE_TRANSFERRED = 0x16,
FILE_RANGES_TRANSFERRED = 0x20
};
// https://learn.microsoft.com/en-us/windows/win32/api/bits/ne-bits-bg_job_state
enum JobState : uint32 {
QUEUED = 0X0,
CONNECTING = 0X1,
TRANSFERRING = 0X2,
SUSPENDED = 0X3,
ERROR = 0X4,
TRANSIENT_ERROR = 0X05,
TRANSFERRED = 0X06,
ACKNOWLEDGED = 0X07,
CANCELLED = 0X08,
};

struct DownloadBitsFile {
char guid[16];
uint32 dst_len;
WCHAR dst[dst_len];
uint32 src_len;
WCHAR src[src_len];
uint32 tmp_len;
WCHAR tmp[tmp_len];
uint64 dl_size;
int64 transfer_size;
char pad;
uint32 drive_len;
WCHAR drive[drive_len];
uint32 volume_len;
WCHAR volume[volume_len];
uint32 unk1;
uint32 unk2;
int64 unk3;
uint32 unk4;
int32 unknown_section_count_1;
char unknown_section_1[unknown_section_count_1];
char unknown_flag_1;
uint32 unknown_section_count_2;
char unknown_section_2[unknown_section_count_2][16];
uint64 file_mtime;
};

// For upload, source file modification time is located at the beginning of the structure
// We need to read less data that in the Download type
struct UploadBitsFile {
char guid[16];
uint64 file_mtime;
uint32 dst_len;
WCHAR dst[dst_len];
uint32 src_len;
WCHAR src[src_len];
uint32 tmp_len;
WCHAR tmp[tmp_len];
uint64 dl_size;
int64 transfer_size;
char pad;
uint32 drive_len;
WCHAR drive[drive_len];
uint32 volume_len;
WCHAR volume[volume_len];
};

struct BitsJobsHeader {
// For some jobs (mainly upload), we may have 2 jobs headers/guid
// this is handled by plugin
char guid[16]; // Indicate version and type (Upload/Download Job)
// e.g
// a1 56 09 e1 43 af c9 42 92 e6 6f 98 56 eb a7 f6 -> DownloadJobGuid 10.3.2
// d0 57 56 8f 2c 01 3e 4e ad 2c f4 a5 d7 65 6f af -> UploadJobGuid 10.3.2
// 38 5c 71 03 1f 28 ca 40 98 13 9d e9 1a 5a 84 d1 -> DownloadJobGuid 10.3.1
// d8 1e d3 68 d5 34 e1 4f 89 23 94 ab cb f4 c1 cf -> UploadJobGuid 10.3.0
uint32 _pad;
BG_JOB_PRIORITY priority;
JobState state;
BG_JOB_TYPE type;
char job_id[16]; // Job UUID, le
uint32 name_len;
WCHAR name[name_len];
uint32 desc_len;
WCHAR desc[desc_len];
uint32 callback_cmd_len;
WCHAR callback_cmd[callback_cmd_len];
uint32 callback_args_len;
WCHAR callback_args[callback_args_len];
uint32 sid_len;
WCHAR sid[sid_len];
BG_NOTIFY notify_flag;
};
struct BitsJobsFileGuidList{
uint32 entry_count;
char files_guid[entry_count][16];
};

struct BitsMetadata {
uint32 transient_error_count;
uint32 retry_delay;
uint32 timeout;
uint64 ctime; // Job creation time
uint64 mtime; // modified on file added, but also on others operation
uint64 mtime_bis; // Modified when a new file is added, but also when transfer is finished
// mtime and mtime_bis should be considered as evidence of activity
// Both are modified in CJob::UpdateModificationTime
uint64 completion_time; // Set in the JoBTransferred Function
// Reset to zero on new file added
// Retrieved using bitsadmin /GETCOMPLETIONTIME
}
"""
c_bits = cstruct().load(bits_def)
Loading