summaryrefslogtreecommitdiff
path: root/src/buildtool/execution_api/remote/bazel
diff options
context:
space:
mode:
authorOliver Reiche <oliver.reiche@gmail.com>2023-04-15 16:28:33 +0200
committerOliver Reiche <oliver.reiche@huawei.com>2023-04-26 18:29:44 +0200
commitd762bfa1953933dfac0a29a74523c25719396b8c (patch)
tree1002b0aecc1af698f0349a4efb4bfc169886c60a /src/buildtool/execution_api/remote/bazel
parent03e1019aca5d08e53bfeb455071d91561fc33039 (diff)
downloadjustbuild-d762bfa1953933dfac0a29a74523c25719396b8c.tar.gz
imports: Switch to Microsoft GSL implementation
... with two minor code base changes compared to previous use of gsl-lite: - dag.hpp: ActionNode::Ptr and ArtifactNode::Ptr are not wrapped in gsl::not_null<> anymore, due to lack of support for wrapping std::unique_ptr<>. More specifically, the move constructor is missing, rendering it impossible to use std::vector<>::emplace_back(). - utils/cpp/gsl.hpp: New header file added to implement the macros ExpectsAudit() and EnsureAudit(), asserts running only in debug builds, which were available in gsl-lite but are missing in MS GSL.
Diffstat (limited to 'src/buildtool/execution_api/remote/bazel')
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_ac_client.cpp2
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_api.hpp2
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_cas_client.cpp2
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_execution_client.cpp2
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_network.cpp121
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_network.hpp4
-rw-r--r--src/buildtool/execution_api/remote/bazel/bazel_response.cpp5
-rw-r--r--src/buildtool/execution_api/remote/bazel/bytestream_client.hpp93
8 files changed, 123 insertions, 108 deletions
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_ac_client.cpp b/src/buildtool/execution_api/remote/bazel/bazel_ac_client.cpp
index 74542453..85759ec1 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_ac_client.cpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_ac_client.cpp
@@ -14,7 +14,7 @@
#include "src/buildtool/execution_api/remote/bazel/bazel_ac_client.hpp"
-#include "gsl-lite/gsl-lite.hpp"
+#include "gsl/gsl"
#include "src/buildtool/common/bazel_types.hpp"
#include "src/buildtool/execution_api/remote/bazel/bazel_client_common.hpp"
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_api.hpp b/src/buildtool/execution_api/remote/bazel/bazel_api.hpp
index 111a09cf..6edfae5f 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_api.hpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_api.hpp
@@ -20,7 +20,7 @@
#include <utility>
#include <vector>
-#include "gsl-lite/gsl-lite.hpp"
+#include "gsl/gsl"
#include "src/buildtool/common/artifact_digest.hpp"
#include "src/buildtool/execution_api/bazel_msg/bazel_common.hpp"
#include "src/buildtool/execution_api/bazel_msg/blob_tree.hpp"
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_cas_client.cpp b/src/buildtool/execution_api/remote/bazel/bazel_cas_client.cpp
index 6dd2e9e3..0c59c83d 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_cas_client.cpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_cas_client.cpp
@@ -15,7 +15,7 @@
#include "src/buildtool/execution_api/remote/bazel/bazel_cas_client.hpp"
#include "grpcpp/grpcpp.h"
-#include "gsl-lite/gsl-lite.hpp"
+#include "gsl/gsl"
#include "src/buildtool/common/bazel_types.hpp"
#include "src/buildtool/execution_api/common/execution_common.hpp"
#include "src/buildtool/execution_api/remote/bazel/bazel_client_common.hpp"
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_execution_client.cpp b/src/buildtool/execution_api/remote/bazel/bazel_execution_client.cpp
index b1244e0c..365998a7 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_execution_client.cpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_execution_client.cpp
@@ -15,7 +15,7 @@
#include "src/buildtool/execution_api/remote/bazel/bazel_execution_client.hpp"
#include "grpcpp/grpcpp.h"
-#include "gsl-lite/gsl-lite.hpp"
+#include "gsl/gsl"
#include "src/buildtool/execution_api/remote/bazel/bazel_client_common.hpp"
namespace bazel_re = build::bazel::remote::execution::v2;
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_network.cpp b/src/buildtool/execution_api/remote/bazel/bazel_network.cpp
index c92164ba..159a848e 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_network.cpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_network.cpp
@@ -120,50 +120,53 @@ auto BazelNetwork::IsAvailable(std::vector<bazel_re::Digest> const& digests)
template <class T_Iter>
auto BazelNetwork::DoUploadBlobs(T_Iter const& first,
T_Iter const& last) noexcept -> bool {
- auto num_blobs = gsl::narrow<std::size_t>(std::distance(first, last));
-
- std::vector<bazel_re::Digest> digests{};
- digests.reserve(num_blobs);
-
- auto begin = first;
- auto current = first;
- std::size_t transfer_size{};
- while (current != last) {
- auto const& blob = *current;
- transfer_size += blob.data.size();
- if (transfer_size > kMaxBatchTransferSize) {
- if (begin == current) {
- if (cas_->UpdateSingleBlob(instance_name_, blob)) {
- digests.emplace_back(blob.digest);
+ try {
+ auto num_blobs = gsl::narrow<std::size_t>(std::distance(first, last));
+
+ std::vector<bazel_re::Digest> digests{};
+ digests.reserve(num_blobs);
+
+ auto begin = first;
+ auto current = first;
+ std::size_t transfer_size{};
+ while (current != last) {
+ auto const& blob = *current;
+ transfer_size += blob.data.size();
+ if (transfer_size > kMaxBatchTransferSize) {
+ if (begin == current) {
+ if (cas_->UpdateSingleBlob(instance_name_, blob)) {
+ digests.emplace_back(blob.digest);
+ }
+ ++current;
}
- ++current;
+ else {
+ for (auto& digest : cas_->BatchUpdateBlobs(
+ instance_name_, begin, current)) {
+ digests.emplace_back(std::move(digest));
+ }
+ }
+ begin = current;
+ transfer_size = 0;
}
else {
- for (auto& digest :
- cas_->BatchUpdateBlobs(instance_name_, begin, current)) {
- digests.emplace_back(std::move(digest));
- }
+ ++current;
}
- begin = current;
- transfer_size = 0;
- }
- else {
- ++current;
}
- }
- if (begin != current) {
- for (auto& digest :
- cas_->BatchUpdateBlobs(instance_name_, begin, current)) {
- digests.emplace_back(std::move(digest));
+ if (begin != current) {
+ for (auto& digest :
+ cas_->BatchUpdateBlobs(instance_name_, begin, current)) {
+ digests.emplace_back(std::move(digest));
+ }
}
- }
- if (digests.size() != num_blobs) {
- Logger::Log(LogLevel::Warning, "Failed to update all blobs");
- return false;
+ if (digests.size() == num_blobs) {
+ return true;
+ }
+ } catch (...) {
}
- return true;
+ Logger::Log(LogLevel::Warning, "Failed to update all blobs");
+ return false;
}
auto BazelNetwork::UploadBlobs(BlobContainer const& blobs,
@@ -206,31 +209,37 @@ auto BazelNetwork::BlobReader::Next() noexcept -> std::vector<BazelBlob> {
std::size_t size{};
std::vector<BazelBlob> blobs{};
- while (current_ != ids_.end()) {
- auto blob_size = gsl::narrow<std::size_t>(current_->size_bytes());
- size += blob_size;
- // read if size is 0 (unknown) or exceeds transfer size
- if (blob_size == 0 or size > kMaxBatchTransferSize) {
- // perform read of range [begin_, current_)
- if (begin_ == current_) {
- auto blob = cas_->ReadSingleBlob(instance_name_, *begin_);
- if (blob) {
- blobs.emplace_back(std::move(*blob));
+ try {
+ while (current_ != ids_.end()) {
+ auto blob_size = gsl::narrow<std::size_t>(current_->size_bytes());
+ size += blob_size;
+ // read if size is 0 (unknown) or exceeds transfer size
+ if (blob_size == 0 or size > kMaxBatchTransferSize) {
+ // perform read of range [begin_, current_)
+ if (begin_ == current_) {
+ auto blob = cas_->ReadSingleBlob(instance_name_, *begin_);
+ if (blob) {
+ blobs.emplace_back(std::move(*blob));
+ }
+ ++current_;
}
- ++current_;
- }
- else {
- blobs = cas_->BatchReadBlobs(instance_name_, begin_, current_);
+ else {
+ blobs =
+ cas_->BatchReadBlobs(instance_name_, begin_, current_);
+ }
+ begin_ = current_;
+ break;
}
- begin_ = current_;
- break;
+ ++current_;
}
- ++current_;
- }
- if (begin_ != current_) {
- blobs = cas_->BatchReadBlobs(instance_name_, begin_, current_);
- begin_ = current_;
+ if (begin_ != current_) {
+ blobs = cas_->BatchReadBlobs(instance_name_, begin_, current_);
+ begin_ = current_;
+ }
+ } catch (std::exception const& e) {
+ Logger::Log(LogLevel::Error, "Reading blobs failed with: {}", e.what());
+ Ensures(false);
}
return blobs;
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_network.hpp b/src/buildtool/execution_api/remote/bazel/bazel_network.hpp
index e2c1a065..c1ae34ad 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_network.hpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_network.hpp
@@ -46,10 +46,10 @@ class BazelNetwork {
std::vector<bazel_re::Digest>::const_iterator current_;
BlobReader(std::string instance_name,
- gsl::not_null<BazelCasClient*> cas,
+ gsl::not_null<BazelCasClient*> const& cas,
std::vector<bazel_re::Digest> ids)
: instance_name_{std::move(instance_name)},
- cas_{std::move(cas)},
+ cas_{cas},
ids_{std::move(ids)},
begin_{ids_.begin()},
current_{begin_} {};
diff --git a/src/buildtool/execution_api/remote/bazel/bazel_response.cpp b/src/buildtool/execution_api/remote/bazel/bazel_response.cpp
index ddebd648..d45d9aa1 100644
--- a/src/buildtool/execution_api/remote/bazel/bazel_response.cpp
+++ b/src/buildtool/execution_api/remote/bazel/bazel_response.cpp
@@ -14,10 +14,11 @@
#include "src/buildtool/execution_api/remote/bazel/bazel_response.hpp"
-#include "gsl-lite/gsl-lite.hpp"
+#include "gsl/gsl"
#include "src/buildtool/compatibility/native_support.hpp"
#include "src/buildtool/execution_api/remote/bazel/bazel_cas_client.hpp"
#include "src/buildtool/logging/logger.hpp"
+#include "src/utils/cpp/gsl.hpp"
namespace {
@@ -63,7 +64,7 @@ auto BazelResponse::Artifacts() const noexcept -> ArtifactInfos {
if (not Compatibility::IsCompatible()) {
// in native mode: just collect and store tree digests
for (auto const& tree : action_result.output_directories()) {
- gsl_ExpectsAudit(NativeSupport::IsTree(tree.tree_digest().hash()));
+ ExpectsAudit(NativeSupport::IsTree(tree.tree_digest().hash()));
try {
artifacts.emplace(
tree.path(),
diff --git a/src/buildtool/execution_api/remote/bazel/bytestream_client.hpp b/src/buildtool/execution_api/remote/bazel/bytestream_client.hpp
index 79905b39..b9a55707 100644
--- a/src/buildtool/execution_api/remote/bazel/bytestream_client.hpp
+++ b/src/buildtool/execution_api/remote/bazel/bytestream_client.hpp
@@ -99,56 +99,61 @@ class ByteStreamClient {
[[nodiscard]] auto Write(std::string const& resource_name,
std::string const& data) const noexcept -> bool {
- grpc::ClientContext ctx;
- google::bytestream::WriteResponse response{};
- auto writer = stub_->Write(&ctx, &response);
-
- auto* allocated_data =
- std::make_unique<std::string>(kChunkSize, '\0').release();
- google::bytestream::WriteRequest request{};
- request.set_resource_name(resource_name);
- request.set_allocated_data(allocated_data);
- std::size_t pos{};
- do {
- auto const size = std::min(data.size() - pos, kChunkSize);
- allocated_data->resize(size);
- data.copy(allocated_data->data(), size, pos);
- request.set_write_offset(static_cast<int>(pos));
- request.set_finish_write(pos + size >= data.size());
- if (not writer->Write(request)) {
- // According to the docs, quote:
- // If there is an error or the connection is broken during the
- // `Write()`, the client should check the status of the
- // `Write()` by calling `QueryWriteStatus()` and continue
- // writing from the returned `committed_size`.
- auto const committed_size = QueryWriteStatus(resource_name);
- if (committed_size <= 0) {
- logger_.Emit(LogLevel::Error,
- "broken stream for upload to resource name {}",
- resource_name);
- return false;
+ try {
+ grpc::ClientContext ctx;
+ google::bytestream::WriteResponse response{};
+ auto writer = stub_->Write(&ctx, &response);
+
+ auto* allocated_data =
+ std::make_unique<std::string>(kChunkSize, '\0').release();
+ google::bytestream::WriteRequest request{};
+ request.set_resource_name(resource_name);
+ request.set_allocated_data(allocated_data);
+ std::size_t pos{};
+ do {
+ auto const size = std::min(data.size() - pos, kChunkSize);
+ allocated_data->resize(size);
+ data.copy(allocated_data->data(), size, pos);
+ request.set_write_offset(static_cast<int>(pos));
+ request.set_finish_write(pos + size >= data.size());
+ if (not writer->Write(request)) {
+ // According to the docs, quote:
+ // If there is an error or the connection is broken during
+ // the `Write()`, the client should check the status of the
+ // `Write()` by calling `QueryWriteStatus()` and continue
+ // writing from the returned `committed_size`.
+ auto const committed_size = QueryWriteStatus(resource_name);
+ if (committed_size <= 0) {
+ logger_.Emit(
+ LogLevel::Error,
+ "broken stream for upload to resource name {}",
+ resource_name);
+ return false;
+ }
+ pos = gsl::narrow<std::size_t>(committed_size);
+ }
+ else {
+ pos += kChunkSize;
}
- pos = gsl::narrow<std::size_t>(committed_size);
+ } while (pos < data.size());
+ if (not writer->WritesDone()) {
+ logger_.Emit(LogLevel::Error,
+ "broken stream for upload to resource name {}",
+ resource_name);
+ return false;
}
- else {
- pos += kChunkSize;
+
+ auto status = writer->Finish();
+ if (not status.ok()) {
+ LogStatus(&logger_, LogLevel::Error, status);
+ return false;
}
- } while (pos < data.size());
- if (not writer->WritesDone()) {
- logger_.Emit(LogLevel::Error,
- "broken stream for upload to resource name {}",
- resource_name);
- return false;
- }
- auto status = writer->Finish();
- if (not status.ok()) {
- LogStatus(&logger_, LogLevel::Error, status);
+ return gsl::narrow<std::size_t>(response.committed_size()) ==
+ data.size();
+ } catch (...) {
return false;
}
-
- return gsl::narrow<std::size_t>(response.committed_size()) ==
- data.size();
}
template <class T_Input>