From b1082f97e5d537191f51eba5331322a98ee5154c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Sat, 20 Dec 2025 13:45:28 -0800 Subject: [PATCH] Automated Code Change PiperOrigin-RevId: 847189651 --- tensorflow/core/kernels/batching_util/concat_split_util.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/batching_util/concat_split_util.h b/tensorflow/core/kernels/batching_util/concat_split_util.h index b5354be35c7..4ac0100fbdf 100644 --- a/tensorflow/core/kernels/batching_util/concat_split_util.h +++ b/tensorflow/core/kernels/batching_util/concat_split_util.h @@ -81,7 +81,7 @@ absl::Status Concat(OpKernelContext* context, (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) if (std::is_same::value) { ConcatGPU(context, inputs_flat, output, &output_flat); - return OkStatus(); + return absl::OkStatus(); } #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM ConcatCPU(context->device(), inputs_flat, &output_flat); @@ -198,9 +198,9 @@ absl::Status SplitCPU(OpKernelContext* context, const Tensor& input, // Handles the general case, on GPU. template -Status SplitGPU(OpKernelContext* context, const Tensor& input, - const gtl::ArraySlice& sizes, - std::vector* outputs) { +absl::Status SplitGPU(OpKernelContext* context, const Tensor& input, + const absl::Span& sizes, + std::vector* outputs) { // TODO(olston, apassos): Implement this. LOG(FATAL) << "Not yet implemented"; // Crash ok }