diff options
author | Maksim Denisov <denisov.maksim@huawei.com> | 2025-02-19 10:18:59 +0100 |
---|---|---|
committer | Maksim Denisov <denisov.maksim@huawei.com> | 2025-02-19 17:50:30 +0100 |
commit | d7b17c189ddbf8a69a6e2855a8242cb4e8f148f5 (patch) | |
tree | c22e79ddd3542fbb23684257a3df79c07c828d53 /src | |
parent | 90da9d81814d1717e424286b1d17ac6fbaf54a9a (diff) | |
download | justbuild-d7b17c189ddbf8a69a6e2855a8242cb4e8f148f5.tar.gz |
BazelNetwork: Optimize DoUploadBlobs
...by removing an extra std::vector of iterators. Instead, remove elements directly from std::unordered_set.
Diffstat (limited to 'src')
-rw-r--r-- | src/buildtool/execution_api/remote/bazel/bazel_network.cpp | 21 |
1 files changed, 7 insertions, 14 deletions
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_network.cpp b/src/buildtool/execution_api/remote/bazel/bazel_network.cpp index e2b6f244..a11ff408 100644 --- a/src/buildtool/execution_api/remote/bazel/bazel_network.cpp +++ b/src/buildtool/execution_api/remote/bazel/bazel_network.cpp @@ -87,25 +87,18 @@ auto BazelNetwork::DoUploadBlobs( } try { - // Partition the blobs according to their size. - // The first group collects all the blobs that must use bytestream api - // because of their size: - using IteratorType = decltype(blobs)::iterator; - std::vector<IteratorType> to_stream; - to_stream.reserve(blobs.size()); - for (auto it = blobs.begin(); it != blobs.end(); ++it) { - if (it->data->size() > MessageLimits::kMaxGrpcLength) { - to_stream.push_back(it); + // First upload all blobs that must use bytestream api because of their + // size: + for (auto it = blobs.begin(); it != blobs.end();) { + if (it->data->size() <= MessageLimits::kMaxGrpcLength) { + ++it; + continue; } - } - - for (auto const& it : to_stream) { if (not cas_->UpdateSingleBlob(instance_name_, *it)) { return false; } - blobs.erase(it); + it = blobs.erase(it); } - to_stream.clear(); // After uploading via stream api, only small blobs that may be uploaded // using batch are in the container: |