Skip to content

Commit 501cda8

Browse files
authored
Update TF compat protos (#6914)
Routine update of TF compat protos, as described in [this message](https://github.com/tensorflow/tensorboard/blob/52530fa0ff253db305a2c83ddb0e5ecee8143467/tensorboard/compat/proto/proto_test.py#L171). This change syncs the protos to the latest nightly, which is the first one right after the branch cut for the new release 2.18.0.
1 parent 52530fa commit 501cda8

8 files changed

+64
-66
lines changed

tensorboard/compat/proto/config.proto

+46-2
Original file line numberDiff line numberDiff line change
@@ -265,6 +265,43 @@ message GPUOptions {
265265
// system memory size for better resource estimation of multi-tenancy(one
266266
// gpu with multiple model) use case.
267267
int32 gpu_system_memory_size_in_mb = 16;
268+
269+
// If true, save information needed for created a PjRt GPU client for
270+
// creating a client with remote devices.
271+
bool populate_pjrt_gpu_client_creation_info = 17;
272+
273+
// node_id for use when creating a PjRt GPU client with remote devices,
274+
// which enumerates jobs*tasks from a ServerDef.
275+
int32 node_id = 18;
276+
277+
// Whether to merge data transfer streams into the compute stream in the
278+
// same stream group. Stream merging helps reduce the overhead caused by
279+
// stream synchronization, especially when data transfers are frequent. For
280+
// example, setting "merge_host_to_device_stream = true" will make the
281+
// compute stream responsible for both computation and host to device memory
282+
// copy.
283+
message StreamMergeOptions {
284+
// If true, the compute stream will be used for host_to_device copy as
285+
// well. It's no longer necessary to record an event before the copy to
286+
// let the copy stream wait for the compute stream to finish. There is
287+
// also no need to wait for the copy to complete before executing the
288+
// callback function.
289+
bool merge_host_to_device_stream = 1;
290+
291+
// If true, the compute stream will be used for device_to_host copy as
292+
// well. It's no longer necessary to record an event before the copy to
293+
// let the copy stream wait for the compute stream to finish.
294+
bool merge_device_to_host_stream = 2;
295+
296+
// If true, the compute stream will be used for device_to_device copy as
297+
// well. It's no longer necessary to record an event before the copy to
298+
// let the copy stream wait for the compute stream of the sending device
299+
// to finish. There is also no need to wait for the compute stream of the
300+
// receiving device to finish if the copy is within the same device.
301+
bool merge_device_to_device_stream = 3;
302+
}
303+
304+
StreamMergeOptions stream_merge_options = 19;
268305
}
269306

270307
// Everything inside experimental is subject to change and is not subject
@@ -499,6 +536,9 @@ message ConfigProto {
499536
// Options that apply to all GPUs.
500537
GPUOptions gpu_options = 6;
501538

539+
// Options that apply to pluggable devices.
540+
GPUOptions pluggable_device_options = 18;
541+
502542
// Whether soft placement is allowed. If allow_soft_placement is true,
503543
// an op will be placed on CPU if
504544
// 1. there's no GPU implementation for the OP
@@ -676,6 +716,10 @@ message ConfigProto {
676716
// If true, use Pathways with TFRT API for multi host support.
677717
bool enable_multi_host = 27;
678718

719+
// If true, use ifrt as the backend for TFRT. This is only used when
720+
// `use_tfrt` is true.
721+
bool tfrt_use_ifrt = 32;
722+
679723
// Port for the Pathways server. Ignored if enable_multi_host=false.
680724
int32 backend_server_port = 28;
681725

@@ -730,12 +774,12 @@ message ConfigProto {
730774

731775
reserved 25;
732776

733-
// Next: 32
777+
// Next: 33
734778
}
735779

736780
Experimental experimental = 16;
737781

738-
// Next: 18
782+
// Next: 19
739783
}
740784

741785
// Options for a single Run() call.

tensorboard/compat/proto/coordination_config.proto

+4
Original file line numberDiff line numberDiff line change
@@ -67,4 +67,8 @@ message CoordinationServiceConfig {
6767
// not specify any config. This field allows users to explicitly disable
6868
// coordination service under all situations.
6969
bool force_disable = 12;
70+
71+
// Use long polling to get error from coordination service as the error
72+
// propagation mechanism.
73+
bool poll_for_error_from_service_at_startup = 13;
7074
}

tensorboard/compat/proto/meta_graph.proto

+6-62
Original file line numberDiff line numberDiff line change
@@ -257,74 +257,18 @@ message TensorInfo {
257257

258258
// SignatureDef defines the signature of a computation supported by a TensorFlow
259259
// graph.
260-
//
261-
// For example, a model with two loss computations, sharing a single input,
262-
// might have the following signature_def map, in a MetaGraphDef message.
263-
//
264-
// Note that across the two SignatureDefs "loss_A" and "loss_B", the input key,
265-
// output key, and method_name are identical, and will be used by system(s) that
266-
// implement or rely upon this particular loss method. The output tensor names
267-
// differ, demonstrating how different outputs can exist for the same method.
268-
//
269-
// signature_def {
270-
// key: "loss_A"
271-
// value {
272-
// inputs {
273-
// key: "input"
274-
// value {
275-
// name: "input:0"
276-
// dtype: DT_STRING
277-
// tensor_shape: ...
278-
// }
279-
// }
280-
// outputs {
281-
// key: "loss_output"
282-
// value {
283-
// name: "loss_output_A:0"
284-
// dtype: DT_FLOAT
285-
// tensor_shape: ...
286-
// }
287-
// }
288-
// method_name: "some/package/compute_loss"
289-
// }
290-
// ...
291-
// }
292-
// signature_def {
293-
// key: "loss_B"
294-
// value {
295-
// inputs {
296-
// key: "input"
297-
// value {
298-
// name: "input:0"
299-
// dtype: DT_STRING
300-
// tensor_shape: ...
301-
// }
302-
// }
303-
// outputs {
304-
// key: "loss_output"
305-
// value {
306-
// name: "loss_output_B:0"
307-
// dtype: DT_FLOAT
308-
// tensor_shape: ...
309-
// }
310-
// }
311-
// method_name: "some/package/compute_loss"
312-
// }
313-
// ...
314-
// }
315260
message SignatureDef {
316261
// Named input parameters.
317262
map<string, TensorInfo> inputs = 1;
318263
// Named output parameters.
319264
map<string, TensorInfo> outputs = 2;
320-
// Extensible method_name information enabling third-party users to mark a
321-
// SignatureDef as supporting a particular method. This enables producers and
322-
// consumers of SignatureDefs, e.g. a model definition library and a serving
323-
// library to have a clear hand-off regarding the semantics of a computation.
265+
// Deprecated: TensorFlow 2 always sets this to a fixed value;
266+
// open-source TF Serving stopped checking by default since release 2.4.
324267
//
325-
// Note that multiple SignatureDefs in a single MetaGraphDef may have the same
326-
// method_name. This is commonly used to support multi-headed computation,
327-
// where a single graph computation may return multiple results.
268+
// In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
269+
// supporting a particular method. Multiple SignatureDefs in a single
270+
// MetaGraphDef could have the same method_name (e.g., to support multi-headed
271+
// computation).
328272
string method_name = 3;
329273
// Named input to corresponding default values if any.
330274
map<string, TensorProto> defaults = 4;

tensorboard/compat/proto/resource_handle.proto

+2
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,9 @@ message ResourceHandleProto {
3434

3535
// Protocol buffer representing a pair of (data type, tensor shape).
3636
message DtypeAndShape {
37+
// Data type of the tensor.
3738
DataType dtype = 1;
39+
// Shape of the tensor.
3840
TensorShapeProto shape = 2;
3941
}
4042

tensorboard/compat/proto/rewriter_config.proto

+2-2
Original file line numberDiff line numberDiff line change
@@ -102,8 +102,8 @@ message RewriterConfig {
102102
// Enable the swap of kernel implementations based on the device placement
103103
// (default is ON).
104104
Toggle implementation_selector = 22;
105-
// Optimize data types for CUDA (default is OFF).
106-
// This will try to use float16 on GPU which is faster.
105+
// Optimize data types for CUDA/oneDNN (default is OFF).
106+
// This will try to use float16 on GPU/CPU which is faster.
107107
// Note that this can change the numerical stability of the graph and may
108108
// require the use of loss scaling to maintain model convergence.
109109
Toggle auto_mixed_precision = 23;

tensorboard/compat/proto/tensor.proto

+1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framewo
1414

1515
// Protocol buffer representing a tensor.
1616
message TensorProto {
17+
// Data type of the tensor.
1718
DataType dtype = 1;
1819

1920
// Shape of the tensor. TODO(touts): sort out the 0-rank issues.
80 Bytes
Binary file not shown.

tensorboard/data/server/tensorboard.pb.rs

+3
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)