Skip to content

Commit 913a985

Browse files
Yanniccopybara-github
authored andcommitted
Report digest of failed uploads
This will make the error message more useful because otherwise, there was no way of telling which upload failed or timed out (other than running with `-j 1`) Closes #12507. PiperOrigin-RevId: 343977441
1 parent 4f044e0 commit 913a985

File tree

4 files changed

+51
-35
lines changed

4 files changed

+51
-35
lines changed

src/main/java/com/google/devtools/build/lib/remote/ByteStreamBuildEventArtifactUploader.java

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import com.google.common.collect.ImmutableList;
2020
import com.google.common.collect.ImmutableSet;
2121
import com.google.common.collect.Iterables;
22-
import com.google.common.hash.HashCode;
2322
import com.google.common.util.concurrent.Futures;
2423
import com.google.common.util.concurrent.ListenableFuture;
2524
import com.google.common.util.concurrent.ListeningExecutorService;
@@ -194,11 +193,7 @@ private ListenableFuture<List<PathMetadata>> uploadLocalFiles(
194193
final ListenableFuture<Void> upload;
195194
Context prevCtx = ctx.attach();
196195
try {
197-
upload =
198-
uploader.uploadBlobAsync(
199-
HashCode.fromString(path.getDigest().getHash()),
200-
chunker,
201-
/* forceUpload=*/ false);
196+
upload = uploader.uploadBlobAsync(path.getDigest(), chunker, /* forceUpload=*/ false);
202197
} finally {
203198
ctx.detach(prevCtx);
204199
}

src/main/java/com/google/devtools/build/lib/remote/ByteStreamUploader.java

Lines changed: 46 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import static java.util.Collections.singletonMap;
2020
import static java.util.concurrent.TimeUnit.SECONDS;
2121

22+
import build.bazel.remote.execution.v2.Digest;
2223
import com.google.bytestream.ByteStreamGrpc;
2324
import com.google.bytestream.ByteStreamGrpc.ByteStreamFutureStub;
2425
import com.google.bytestream.ByteStreamProto.QueryWriteStatusRequest;
@@ -64,10 +65,10 @@
6465
/**
6566
* A client implementing the {@code Write} method of the {@code ByteStream} gRPC service.
6667
*
67-
* <p>The uploader supports reference counting to easily be shared between components with
68-
* different lifecyles. After instantiation the reference count is {@code 1}.
68+
* <p>The uploader supports reference counting to easily be shared between components with different
69+
* lifecyles. After instantiation the reference count is {@code 1}.
6970
*
70-
* See {@link ReferenceCounted} for more information on reference counting.
71+
* <p>See {@link ReferenceCounted} for more information on reference counting.
7172
*/
7273
class ByteStreamUploader extends AbstractReferenceCounted {
7374

@@ -81,12 +82,12 @@ class ByteStreamUploader extends AbstractReferenceCounted {
8182

8283
private final Object lock = new Object();
8384

84-
/** Contains the hash codes of already uploaded blobs. **/
85+
/** Contains the hash codes of already uploaded blobs. * */
8586
@GuardedBy("lock")
8687
private final Set<HashCode> uploadedBlobs = new HashSet<>();
8788

8889
@GuardedBy("lock")
89-
private final Map<HashCode, ListenableFuture<Void>> uploadsInProgress = new HashMap<>();
90+
private final Map<Digest, ListenableFuture<Void>> uploadsInProgress = new HashMap<>();
9091

9192
@GuardedBy("lock")
9293
private boolean isShutdown;
@@ -179,8 +180,8 @@ public void uploadBlobs(Map<HashCode, Chunker> chunkers, boolean forceUpload)
179180
* Cancels all running uploads. The method returns immediately and does NOT wait for the uploads
180181
* to be cancelled.
181182
*
182-
* <p>This method should not be called directly, but will be called implicitly when the
183-
* reference count reaches {@code 0}.
183+
* <p>This method should not be called directly, but will be called implicitly when the reference
184+
* count reaches {@code 0}.
184185
*/
185186
@VisibleForTesting
186187
void shutdown() {
@@ -199,6 +200,16 @@ void shutdown() {
199200
}
200201
}
201202

203+
/** @deprecated Use {@link #uploadBlobAsync(Digest, Chunker, boolean)} instead. */
204+
@Deprecated
205+
@VisibleForTesting
206+
public ListenableFuture<Void> uploadBlobAsync(
207+
HashCode hash, Chunker chunker, boolean forceUpload) {
208+
Digest digest =
209+
Digest.newBuilder().setHash(hash.toString()).setSizeBytes(chunker.getSize()).build();
210+
return uploadBlobAsync(digest, chunker, forceUpload);
211+
}
212+
202213
/**
203214
* Uploads a BLOB asynchronously to the remote {@code ByteStream} service. The call returns
204215
* immediately and one can listen to the returned future for the success/failure of the upload.
@@ -209,32 +220,32 @@ void shutdown() {
209220
* <p>Trying to upload the same BLOB multiple times concurrently, results in only one upload being
210221
* performed. This is transparent to the user of this API.
211222
*
212-
* @param hash the hash of the data to upload.
223+
* @param digest the {@link Digest} of the data to upload.
213224
* @param chunker the data to upload.
214225
* @param forceUpload if {@code false} the blob is not uploaded if it has previously been
215226
* uploaded, if {@code true} the blob is uploaded.
216227
* @throws IOException when reading of the {@link Chunker}s input source fails
217228
*/
218229
public ListenableFuture<Void> uploadBlobAsync(
219-
HashCode hash, Chunker chunker, boolean forceUpload) {
230+
Digest digest, Chunker chunker, boolean forceUpload) {
220231
synchronized (lock) {
221232
checkState(!isShutdown, "Must not call uploadBlobs after shutdown.");
222233

223-
if (!forceUpload && uploadedBlobs.contains(hash)) {
234+
if (!forceUpload && uploadedBlobs.contains(HashCode.fromString(digest.getHash()))) {
224235
return Futures.immediateFuture(null);
225236
}
226237

227-
ListenableFuture<Void> inProgress = uploadsInProgress.get(hash);
238+
ListenableFuture<Void> inProgress = uploadsInProgress.get(digest);
228239
if (inProgress != null) {
229240
return inProgress;
230241
}
231242

232243
ListenableFuture<Void> uploadResult =
233244
Futures.transform(
234-
startAsyncUpload(hash, chunker),
245+
startAsyncUpload(digest, chunker),
235246
(v) -> {
236247
synchronized (lock) {
237-
uploadedBlobs.add(hash);
248+
uploadedBlobs.add(HashCode.fromString(digest.getHash()));
238249
}
239250
return null;
240251
},
@@ -244,14 +255,20 @@ public ListenableFuture<Void> uploadBlobAsync(
244255
Futures.catchingAsync(
245256
uploadResult,
246257
StatusRuntimeException.class,
247-
(sre) -> Futures.immediateFailedFuture(new IOException(sre)),
258+
(sre) ->
259+
Futures.immediateFailedFuture(
260+
new IOException(
261+
String.format(
262+
"Error while uploading artifact with digest '%s/%s'",
263+
digest.getHash(), digest.getSizeBytes()),
264+
sre)),
248265
MoreExecutors.directExecutor());
249266

250-
uploadsInProgress.put(hash, uploadResult);
267+
uploadsInProgress.put(digest, uploadResult);
251268
uploadResult.addListener(
252269
() -> {
253270
synchronized (lock) {
254-
uploadsInProgress.remove(hash);
271+
uploadsInProgress.remove(digest);
255272
}
256273
},
257274
MoreExecutors.directExecutor());
@@ -267,25 +284,33 @@ boolean uploadsInProgress() {
267284
}
268285
}
269286

270-
private static String buildUploadResourceName(
271-
String instanceName, UUID uuid, HashCode hash, long size) {
272-
String resourceName = format("uploads/%s/blobs/%s/%d", uuid, hash, size);
287+
private static String buildUploadResourceName(String instanceName, UUID uuid, Digest digest) {
288+
String resourceName =
289+
format("uploads/%s/blobs/%s/%d", uuid, digest.getHash(), digest.getSizeBytes());
273290
if (!Strings.isNullOrEmpty(instanceName)) {
274291
resourceName = instanceName + "/" + resourceName;
275292
}
276293
return resourceName;
277294
}
278295

279296
/** Starts a file upload and returns a future representing the upload. */
280-
private ListenableFuture<Void> startAsyncUpload(HashCode hash, Chunker chunker) {
297+
private ListenableFuture<Void> startAsyncUpload(Digest digest, Chunker chunker) {
281298
try {
282299
chunker.reset();
283300
} catch (IOException e) {
284301
return Futures.immediateFailedFuture(e);
285302
}
286303

304+
if (chunker.getSize() != digest.getSizeBytes()) {
305+
return Futures.immediateFailedFuture(
306+
new IllegalStateException(
307+
String.format(
308+
"Expected chunker size of %d, got %d",
309+
digest.getSizeBytes(), chunker.getSize())));
310+
}
311+
287312
UUID uploadId = UUID.randomUUID();
288-
String resourceName = buildUploadResourceName(instanceName, uploadId, hash, chunker.getSize());
313+
String resourceName = buildUploadResourceName(instanceName, uploadId, digest);
289314
AsyncUpload newUpload =
290315
new AsyncUpload(
291316
channel, callCredentialsProvider, callTimeoutSecs, retrier, resourceName, chunker);

src/main/java/com/google/devtools/build/lib/remote/GrpcCacheClient.java

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import com.google.common.collect.ImmutableSet;
3939
import com.google.common.collect.Iterables;
4040
import com.google.common.flogger.GoogleLogger;
41-
import com.google.common.hash.HashCode;
4241
import com.google.common.util.concurrent.Futures;
4342
import com.google.common.util.concurrent.ListenableFuture;
4443
import com.google.common.util.concurrent.MoreExecutors;
@@ -387,16 +386,14 @@ public void onCompleted() {
387386
@Override
388387
public ListenableFuture<Void> uploadFile(Digest digest, Path path) {
389388
return uploader.uploadBlobAsync(
390-
HashCode.fromString(digest.getHash()),
389+
digest,
391390
Chunker.builder().setInput(digest.getSizeBytes(), path).build(),
392391
/* forceUpload= */ true);
393392
}
394393

395394
@Override
396395
public ListenableFuture<Void> uploadBlob(Digest digest, ByteString data) {
397396
return uploader.uploadBlobAsync(
398-
HashCode.fromString(digest.getHash()),
399-
Chunker.builder().setInput(data.toByteArray()).build(),
400-
/* forceUpload= */ true);
397+
digest, Chunker.builder().setInput(data.toByteArray()).build(), /* forceUpload= */ true);
401398
}
402399
}

src/test/java/com/google/devtools/build/lib/remote/ByteStreamBuildEventArtifactUploaderTest.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -333,7 +333,7 @@ public void remoteFileShouldNotBeUploaded_findMissingDigests() throws Exception
333333
StaticMissingDigestsFinder digestQuerier =
334334
Mockito.spy(new StaticMissingDigestsFinder(ImmutableSet.of(remoteDigest)));
335335
ByteStreamUploader uploader = Mockito.mock(ByteStreamUploader.class);
336-
when(uploader.uploadBlobAsync(any(), any(), anyBoolean()))
336+
when(uploader.uploadBlobAsync(any(Digest.class), any(), anyBoolean()))
337337
.thenReturn(Futures.immediateFuture(null));
338338
ByteStreamBuildEventArtifactUploader artifactUploader =
339339
newArtifactUploader(uploader, digestQuerier);
@@ -349,8 +349,7 @@ public void remoteFileShouldNotBeUploaded_findMissingDigests() throws Exception
349349

350350
// assert
351351
verify(digestQuerier).findMissingDigests(any());
352-
verify(uploader)
353-
.uploadBlobAsync(eq(HashCode.fromString(localDigest.getHash())), any(), anyBoolean());
352+
verify(uploader).uploadBlobAsync(eq(localDigest), any(), anyBoolean());
354353
assertThat(pathConverter.apply(remoteFile)).contains(remoteDigest.getHash());
355354
assertThat(pathConverter.apply(localFile)).contains(localDigest.getHash());
356355
}

0 commit comments

Comments
 (0)