Skip to content

Commit b6a74be

Browse files
autoantwortLeander Schulten
andauthored
ZipReadBinaryProvider: Decompress largest zip archives first (microsoft#1715)
This results in lower total time needed to decompress all zip files. Co-authored-by: Leander Schulten <[email protected]>
1 parent d0d4200 commit b6a74be

File tree

1 file changed

+13
-4
lines changed

1 file changed

+13
-4
lines changed

src/vcpkg/binarycaching.cpp

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -301,20 +301,29 @@ namespace
301301
std::vector<Optional<ZipResource>> zip_paths(actions.size(), nullopt);
302302
acquire_zips(actions, zip_paths);
303303

304-
std::vector<Command> jobs;
304+
std::vector<std::pair<Command, uint64_t>> jobs_with_size;
305305
std::vector<size_t> action_idxs;
306306
for (size_t i = 0; i < actions.size(); ++i)
307307
{
308308
if (!zip_paths[i]) continue;
309309
const auto& pkg_path = actions[i]->package_dir.value_or_exit(VCPKG_LINE_INFO);
310310
clean_prepare_dir(m_fs, pkg_path);
311-
jobs.push_back(m_zip.decompress_zip_archive_cmd(pkg_path, zip_paths[i].get()->path));
311+
jobs_with_size.emplace_back(m_zip.decompress_zip_archive_cmd(pkg_path, zip_paths[i].get()->path),
312+
m_fs.file_size(zip_paths[i].get()->path, VCPKG_LINE_INFO));
312313
action_idxs.push_back(i);
313314
}
315+
std::sort(jobs_with_size.begin(), jobs_with_size.end(), [](const auto& l, const auto& r) {
316+
return l.second > r.second;
317+
});
314318

315-
auto job_results = decompress_in_parallel(jobs);
319+
std::vector<Command> sorted_jobs;
320+
for (auto&& e : jobs_with_size)
321+
{
322+
sorted_jobs.push_back(std::move(e.first));
323+
}
324+
auto job_results = decompress_in_parallel(sorted_jobs);
316325

317-
for (size_t j = 0; j < jobs.size(); ++j)
326+
for (size_t j = 0; j < sorted_jobs.size(); ++j)
318327
{
319328
const auto i = action_idxs[j];
320329
const auto& zip_path = zip_paths[i].value_or_exit(VCPKG_LINE_INFO);

0 commit comments

Comments
 (0)