mirror of
https://github.com/zebrajr/tensorflow.git
synced 2026-01-15 12:15:41 +00:00
[TF] [NFC] Replace absl::optional w/ std::optional in SE and compiler/
PiperOrigin-RevId: 452781234
This commit is contained in:
committed by
TensorFlower Gardener
parent
fd23e69082
commit
0a723ddac4
@@ -51,7 +51,7 @@ class BatchMatMulOp : public XlaOpKernel {
|
||||
private:
|
||||
bool adj_x_;
|
||||
bool adj_y_;
|
||||
absl::optional<xla::PrimitiveType> preferred_element_type_;
|
||||
std::optional<xla::PrimitiveType> preferred_element_type_;
|
||||
};
|
||||
|
||||
REGISTER_XLA_OP(Name("BatchMatMul"), BatchMatMulOp);
|
||||
|
||||
@@ -160,7 +160,7 @@ Status XlaGatherWithBatchDimsOpImpl(XlaOpKernelContext* context,
|
||||
auto indices = context->Input(1);
|
||||
auto indices_shape = context->InputShape(1);
|
||||
|
||||
absl::optional<int64_t> axis;
|
||||
std::optional<int64_t> axis;
|
||||
if (context->num_inputs() == 3) {
|
||||
const TensorShape axis_shape = context->InputShape(2);
|
||||
if (!TensorShapeUtils::IsScalar(axis_shape)) {
|
||||
|
||||
@@ -42,9 +42,9 @@ absl::InlinedVector<int, 5> ConvertCompileTimeConstArgumentsToConst(
|
||||
// If we can infer the constant values of an inner computation's argument,
|
||||
// replace them with constants. If that fails, we fallback to infer the
|
||||
// bounds of the argument.
|
||||
StatusOr<absl::optional<Tensor>> maybe_constant =
|
||||
StatusOr<std::optional<Tensor>> maybe_constant =
|
||||
expression.ResolveConstant(ctx->compiler()->client());
|
||||
StatusOr<absl::optional<Tensor>> bounds =
|
||||
StatusOr<std::optional<Tensor>> bounds =
|
||||
expression.ResolveConstant(ctx->compiler()->client(), false,
|
||||
xla::ValueInferenceMode::kUpperBound);
|
||||
if ((maybe_constant.ok() && maybe_constant->has_value()) ||
|
||||
|
||||
@@ -62,7 +62,7 @@ class ResizeBilinearGradOp : public XlaOpKernel {
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
// Fallback light outside compilation kernel for the option combination we do
|
||||
// not support.
|
||||
absl::optional<CallTfKernelOp> fallback_tf_kernel_;
|
||||
std::optional<CallTfKernelOp> fallback_tf_kernel_;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@@ -190,7 +190,7 @@ class MaxPoolOp : public PoolingOp {
|
||||
// For VECT_C max-pool ops, transpose to plain NCHW, do the max-pool, and
|
||||
// transpose back. This isn't necessarily the most efficient algorithm, but
|
||||
// it's ok for starters.
|
||||
absl::optional<int64_t> vect_width;
|
||||
std::optional<int64_t> vect_width;
|
||||
if (data_format_ == FORMAT_NCHW_VECT_C) {
|
||||
vect_width = input_shape->dimensions().back();
|
||||
input = xla::Collapse(xla::Transpose(input, {0, 1, 4, 2, 3}), {1, 2});
|
||||
|
||||
@@ -42,7 +42,7 @@ class ShardingOp : public XlaOpKernel {
|
||||
// The builder might create a broadcast from a constant, so we clear
|
||||
// sharding for the input.
|
||||
xla::XlaScopedShardingAssignment no_sharding(ctx->builder(),
|
||||
absl::nullopt);
|
||||
std::nullopt);
|
||||
input = ctx->Input(0);
|
||||
}
|
||||
auto shape_or = ctx->builder()->GetShape(input);
|
||||
|
||||
@@ -174,8 +174,8 @@ class UniqueOpBase : public XlaOpKernel {
|
||||
sort_keys.push_back(iota);
|
||||
sort_types.push_back(xla::S32);
|
||||
|
||||
std::vector<absl::optional<xla::XlaOp (*)(xla::XlaOp, xla::XlaOp,
|
||||
absl::Span<const int64_t>)>>
|
||||
std::vector<std::optional<xla::XlaOp (*)(xla::XlaOp, xla::XlaOp,
|
||||
absl::Span<const int64_t>)>>
|
||||
generators(sort_types.size(), xla::LtTotalOrder);
|
||||
auto lt_chain = xla::CreateScalarComparisonComputation(
|
||||
"UniqueV2Lt", sort_types, generators, ctx->builder());
|
||||
|
||||
@@ -39,7 +39,7 @@ class XlaConvOp : public XlaOpKernel {
|
||||
OP_REQUIRES(context,
|
||||
precision_config_.ParsePartialFromString(precision_config_attr),
|
||||
errors::InvalidArgument("Error parsing precision config."));
|
||||
preferred_element_type_ = absl::nullopt;
|
||||
preferred_element_type_ = std::nullopt;
|
||||
batch_group_count_ = 1;
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ class XlaConvOp : public XlaOpKernel {
|
||||
}
|
||||
|
||||
protected:
|
||||
absl::optional<xla::PrimitiveType> preferred_element_type_;
|
||||
std::optional<xla::PrimitiveType> preferred_element_type_;
|
||||
int64_t batch_group_count_;
|
||||
|
||||
private:
|
||||
|
||||
@@ -42,7 +42,7 @@ class XlaDotOp : public XlaOpKernel {
|
||||
context,
|
||||
precision_config_.ParsePartialFromString(precision_config_attr),
|
||||
errors::InvalidArgument("Error parsing convolution dimension numbers"));
|
||||
preferred_element_type_ = absl::nullopt;
|
||||
preferred_element_type_ = std::nullopt;
|
||||
}
|
||||
|
||||
void Compile(XlaOpKernelContext* context) override {
|
||||
@@ -58,7 +58,7 @@ class XlaDotOp : public XlaOpKernel {
|
||||
}
|
||||
|
||||
protected:
|
||||
absl::optional<xla::PrimitiveType> preferred_element_type_;
|
||||
std::optional<xla::PrimitiveType> preferred_element_type_;
|
||||
|
||||
private:
|
||||
xla::DotDimensionNumbers dnums_;
|
||||
|
||||
@@ -54,7 +54,7 @@ class NoncopyableBuffer {
|
||||
|
||||
// Allocates an owning buffer and initializes it with the specified data. Size
|
||||
// is specified in number of uint32's.
|
||||
NoncopyableBuffer(size_t size_in_u32s, absl::optional<uint32_t> value,
|
||||
NoncopyableBuffer(size_t size_in_u32s, std::optional<uint32_t> value,
|
||||
BufferAllocator allocator = DefaultAllocator)
|
||||
: NoncopyableBuffer(size_in_u32s * sizeof(uint32_t), allocator) {
|
||||
#ifndef MEMORY_SANITIZER
|
||||
|
||||
@@ -49,7 +49,7 @@ class TpuExecutable : public xla::TpuExecutableInterface {
|
||||
const ServiceExecutableRunOptions& run_options,
|
||||
absl::Span<const stream_executor::DeviceMemoryBase> arguments,
|
||||
stream_executor::DeviceMemoryBase result,
|
||||
absl::optional<stream_executor::DeviceMemoryBase>
|
||||
std::optional<stream_executor::DeviceMemoryBase>
|
||||
cross_program_prefetch_addr) override {
|
||||
LOG(FATAL) << "LoadProgramAndEnqueueToStream unimplemented";
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ TpuExecutableInterface::AllocateOutputMemoryWithInputReuse(
|
||||
|
||||
TF_RETURN_IF_ERROR(alias_config.ForEachAliasWithStatus(
|
||||
[&](const ShapeIndex& output_index,
|
||||
absl::optional<HloInputOutputAliasConfig::Alias> alias) {
|
||||
std::optional<HloInputOutputAliasConfig::Alias> alias) {
|
||||
if (alias && alias->must_alias()) {
|
||||
VLOG(1) << alias->ToString();
|
||||
const MaybeOwningDeviceMemory& original_input =
|
||||
@@ -121,7 +121,7 @@ TpuExecutableInterface::AllocateOutputMemoryWithInputReuse(
|
||||
result_index.ToString());
|
||||
}
|
||||
|
||||
absl::optional<HloInputOutputAliasConfig::Alias> alias =
|
||||
std::optional<HloInputOutputAliasConfig::Alias> alias =
|
||||
alias_config.GetAliasedParameter(result_index);
|
||||
if (alias) {
|
||||
TF_RET_CHECK(alias->parameter_number < arguments->size());
|
||||
@@ -204,7 +204,7 @@ StatusOr<ExecutionOutput> TpuExecutableInterface::ExecuteAsyncOnStream(
|
||||
run_options->run_options().host_to_device_stream()));
|
||||
|
||||
// Address of the buffer in TPU memory that is being speculated.
|
||||
absl::optional<se::DeviceMemoryBase> cross_program_prefetch_addr;
|
||||
std::optional<se::DeviceMemoryBase> cross_program_prefetch_addr;
|
||||
if (hlo_module_) {
|
||||
for (const auto& prefetch : hlo_module_->CrossProgramPrefetches()) {
|
||||
const auto& parameter = prefetch.first;
|
||||
|
||||
@@ -73,7 +73,7 @@ class TpuExecutableInterface : public Executable {
|
||||
const ServiceExecutableRunOptions& run_options,
|
||||
absl::Span<const stream_executor::DeviceMemoryBase> arguments,
|
||||
stream_executor::DeviceMemoryBase result,
|
||||
absl::optional<stream_executor::DeviceMemoryBase>
|
||||
std::optional<stream_executor::DeviceMemoryBase>
|
||||
cross_program_prefetch_addr) = 0;
|
||||
|
||||
virtual absl::string_view fingerprint() const = 0;
|
||||
|
||||
@@ -220,7 +220,7 @@ bool TpuExecutor::DeviceMemoryUsage(int64_t* free, int64_t* total) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
absl::optional<stream_executor::AllocatorStats>
|
||||
std::optional<stream_executor::AllocatorStats>
|
||||
TpuExecutor::GetAllocatorStats() {
|
||||
SE_AllocatorStats c_stats;
|
||||
if (tpu::ExecutorApiFn()->TpuExecutor_GetAllocatorStatsFn(executor_,
|
||||
|
||||
@@ -101,7 +101,7 @@ class TpuExecutor : public tensorflow::tpu::TpuExecutorInterface {
|
||||
Status EnqueueInfeed(int32_t infeed_queue_index,
|
||||
absl::Span<const uint8> bytes);
|
||||
|
||||
absl::optional<stream_executor::AllocatorStats> GetAllocatorStats() override;
|
||||
std::optional<stream_executor::AllocatorStats> GetAllocatorStats() override;
|
||||
|
||||
tpu::TpuCoreLocationExternal GetCoreLocationExternal() const override;
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ Status TpuOpExecutable::LoadProgramAndEnqueueToStream(
|
||||
const xla::ServiceExecutableRunOptions& run_options,
|
||||
absl::Span<const se::DeviceMemoryBase> arguments,
|
||||
se::DeviceMemoryBase result,
|
||||
absl::optional<se::DeviceMemoryBase> cross_program_prefetch_addr) {
|
||||
std::optional<se::DeviceMemoryBase> cross_program_prefetch_addr) {
|
||||
SE_DeviceMemoryBase* arguments_bases = nullptr;
|
||||
if (!arguments.empty()) {
|
||||
arguments_bases = new SE_DeviceMemoryBase[arguments.size()];
|
||||
|
||||
@@ -53,7 +53,7 @@ class TpuOpExecutable : public xla::TpuExecutableInterface {
|
||||
const xla::ServiceExecutableRunOptions& run_options,
|
||||
absl::Span<const stream_executor::DeviceMemoryBase> arguments,
|
||||
stream_executor::DeviceMemoryBase result,
|
||||
absl::optional<stream_executor::DeviceMemoryBase>
|
||||
std::optional<stream_executor::DeviceMemoryBase>
|
||||
cross_program_prefetch_addr) override;
|
||||
|
||||
xla::Shape HostShapeToDeviceShape(const xla::Shape& host_shape) override;
|
||||
|
||||
Reference in New Issue
Block a user