diff --git a/aten/src/ATen/LegacyBatchedTensorImpl.h b/aten/src/ATen/LegacyBatchedTensorImpl.h index 798e3535af3..f051e7b1f65 100644 --- a/aten/src/ATen/LegacyBatchedTensorImpl.h +++ b/aten/src/ATen/LegacyBatchedTensorImpl.h @@ -144,7 +144,7 @@ inline std::bitset createVmapLevelsBitset(BatchDimsRef bdims) { } inline std::ostream& operator<<(std::ostream& out, const BatchDim& bdim) { - out << "(lvl=" << bdim.level() << ", dim=" << bdim.dim() << ")"; + out << "(lvl=" << bdim.level() << ", dim=" << bdim.dim() << ')'; return out; } diff --git a/aten/src/ATen/TensorIndexing.cpp b/aten/src/ATen/TensorIndexing.cpp index 1fa85268665..8618a67259c 100644 --- a/aten/src/ATen/TensorIndexing.cpp +++ b/aten/src/ATen/TensorIndexing.cpp @@ -9,7 +9,7 @@ namespace indexing { const EllipsisIndexType Ellipsis = EllipsisIndexType(); std::ostream& operator<<(std::ostream& stream, const Slice& slice) { - stream << slice.start() << ":" << slice.stop() << ":" << slice.step(); + stream << slice.start() << ':' << slice.stop() << ':' << slice.step(); return stream; } @@ -31,12 +31,12 @@ std::ostream& operator<<(std::ostream& stream, const TensorIndex& tensor_index) } std::ostream& operator<<(std::ostream& stream, const std::vector& tensor_indices) { - stream << "("; + stream << '('; for (const auto i : c10::irange(tensor_indices.size())) { stream << tensor_indices[i]; if (i < tensor_indices.size() - 1) stream << ", "; } - stream << ")"; + stream << ')'; return stream; } diff --git a/aten/src/ATen/TensorNames.cpp b/aten/src/ATen/TensorNames.cpp index bff12aa8de6..ac6857b95c1 100644 --- a/aten/src/ATen/TensorNames.cpp +++ b/aten/src/ATen/TensorNames.cpp @@ -113,7 +113,7 @@ void TensorNames::checkUnique(const char* op_name) const { std::ostream& operator<<(std::ostream& out, const TensorName& tensorname) { out << tensorname.name_ << " (index "; out << tensorname.origin_idx_ << " of "; - out << tensorname.origin_ << ")"; + out << tensorname.origin_ << ')'; return out; } diff --git a/aten/src/ATen/TensorUtils.cpp b/aten/src/ATen/TensorUtils.cpp index 8236751679f..2752ff792e4 100644 --- a/aten/src/ATen/TensorUtils.cpp +++ b/aten/src/ATen/TensorUtils.cpp @@ -13,9 +13,9 @@ std::ostream& operator<<(std::ostream & out, const TensorGeometryArg& t) { if (t.pos == 0) { // 0 is distinguished; it usually indicates 'self' or the return // tensor - out << "'" << t.name << "'"; + out << '\'' << t.name << '\''; } else { - out << "argument #" << t.pos << " '" << t.name << "'"; + out << "argument #" << t.pos << " '" << t.name << '\''; } return out; } @@ -154,7 +154,7 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) { oss << "Tensor for " << t2 << " is on CPU, "; } oss << "but expected " << ((!t1->is_cpu() && !t2->is_cpu()) ? "them" : "it") - << " to be on GPU (while checking arguments for " << c << ")"; + << " to be on GPU (while checking arguments for " << c << ')'; TORCH_CHECK(false, oss.str()); } TORCH_CHECK( @@ -199,7 +199,7 @@ void checkScalarTypes(CheckedFrom c, const TensorArg& t, i++; } oss << "; but got " << t->toString() - << " instead (while checking arguments for " << c << ")"; + << " instead (while checking arguments for " << c << ')'; TORCH_CHECK(false, oss.str()); } } diff --git a/aten/src/ATen/Version.cpp b/aten/src/ATen/Version.cpp index 7239f357fdd..a6335d9e113 100644 --- a/aten/src/ATen/Version.cpp +++ b/aten/src/ATen/Version.cpp @@ -43,8 +43,8 @@ std::string get_mkldnn_version() { // https://github.com/intel/ideep/issues/29 { const dnnl_version_t* ver = dnnl_version(); - ss << "Intel(R) MKL-DNN v" << ver->major << "." << ver->minor << "." << ver->patch - << " (Git Hash " << ver->hash << ")"; + ss << "Intel(R) MKL-DNN v" << ver->major << '.' << ver->minor << '.' << ver->patch + << " (Git Hash " << ver->hash << ')'; } #else ss << "MKLDNN not found"; @@ -81,7 +81,7 @@ std::string get_openmp_version() { break; } if (ver_str) { - ss << " (a.k.a. OpenMP " << ver_str << ")"; + ss << " (a.k.a. OpenMP " << ver_str << ')'; } } #else @@ -135,38 +135,38 @@ std::string show_config() { #if defined(__GNUC__) { - ss << " - GCC " << __GNUC__ << "." << __GNUC_MINOR__ << "\n"; + ss << " - GCC " << __GNUC__ << '.' << __GNUC_MINOR__ << '\n'; } #endif #if defined(__cplusplus) { - ss << " - C++ Version: " << __cplusplus << "\n"; + ss << " - C++ Version: " << __cplusplus << '\n'; } #endif #if defined(__clang_major__) { - ss << " - clang " << __clang_major__ << "." << __clang_minor__ << "." << __clang_patchlevel__ << "\n"; + ss << " - clang " << __clang_major__ << '.' << __clang_minor__ << '.' << __clang_patchlevel__ << '\n'; } #endif #if defined(_MSC_VER) { - ss << " - MSVC " << _MSC_FULL_VER << "\n"; + ss << " - MSVC " << _MSC_FULL_VER << '\n'; } #endif #if AT_MKL_ENABLED() - ss << " - " << get_mkl_version() << "\n"; + ss << " - " << get_mkl_version() << '\n'; #endif #if AT_MKLDNN_ENABLED() - ss << " - " << get_mkldnn_version() << "\n"; + ss << " - " << get_mkldnn_version() << '\n'; #endif #ifdef _OPENMP - ss << " - " << get_openmp_version() << "\n"; + ss << " - " << get_openmp_version() << '\n'; #endif #if AT_BUILD_WITH_LAPACK() @@ -183,7 +183,7 @@ std::string show_config() { ss << " - Cross compiling on MacOSX\n"; #endif - ss << " - "<< used_cpu_capability() << "\n"; + ss << " - "<< used_cpu_capability() << '\n'; if (hasCUDA()) { ss << detail::getCUDAHooks().showConfig(); @@ -200,10 +200,10 @@ std::string show_config() { ss << " - Build settings: "; for (const auto& pair : caffe2::GetBuildOptions()) { if (!pair.second.empty()) { - ss << pair.first << "=" << pair.second << ", "; + ss << pair.first << '=' << pair.second << ", "; } } - ss << "\n"; + ss << '\n'; // TODO: do HIP // TODO: do XLA diff --git a/aten/src/ATen/code_template.h b/aten/src/ATen/code_template.h index 2026795fc0a..2cde802dac1 100644 --- a/aten/src/ATen/code_template.h +++ b/aten/src/ATen/code_template.h @@ -209,7 +209,7 @@ struct CodeTemplate { // to indent correctly in the context. void emitIndent(std::ostream& out, size_t indent) const { for ([[maybe_unused]] const auto i : c10::irange(indent)) { - out << " "; + out << ' '; } } void emitStringWithIndents( diff --git a/aten/src/ATen/core/Dimname.cpp b/aten/src/ATen/core/Dimname.cpp index c78d554732b..66aa8cb69e1 100644 --- a/aten/src/ATen/core/Dimname.cpp +++ b/aten/src/ATen/core/Dimname.cpp @@ -10,7 +10,7 @@ std::ostream& operator<<(std::ostream& out, const Dimname& dimname) { if (dimname.type() == NameType::WILDCARD) { out << "None"; } else { - out << "'" << dimname.symbol().toUnqualString() << "'"; + out << '\'' << dimname.symbol().toUnqualString() << '\''; } return out; } diff --git a/aten/src/ATen/core/Range.cpp b/aten/src/ATen/core/Range.cpp index 06a79a9c7d0..b5f4c7b6f85 100644 --- a/aten/src/ATen/core/Range.cpp +++ b/aten/src/ATen/core/Range.cpp @@ -5,7 +5,7 @@ namespace at { std::ostream& operator<<(std::ostream& out, const Range& range) { - out << "Range[" << range.begin << ", " << range.end << "]"; + out << "Range[" << range.begin << ", " << range.end << ']'; return out; } diff --git a/aten/src/ATen/core/Tensor.cpp b/aten/src/ATen/core/Tensor.cpp index c5f887f096c..090e77e7037 100644 --- a/aten/src/ATen/core/Tensor.cpp +++ b/aten/src/ATen/core/Tensor.cpp @@ -71,7 +71,7 @@ void TensorBase::enforce_invariants() { void TensorBase::print() const { if (defined()) { - std::cerr << "[" << toString() << " " << sizes() << "]" << '\n'; + std::cerr << '[' << toString() << ' ' << sizes() << ']' << '\n'; } else { std::cerr << "[UndefinedTensor]" << '\n'; } diff --git a/aten/src/ATen/core/Vitals.cpp b/aten/src/ATen/core/Vitals.cpp index 1cfc720aca5..ac1ee45d583 100644 --- a/aten/src/ATen/core/Vitals.cpp +++ b/aten/src/ATen/core/Vitals.cpp @@ -9,8 +9,8 @@ APIVitals VitalsAPI; std::ostream& operator<<(std::ostream& os, TorchVital const& tv) { for (const auto& m : tv.attrs) { - os << "[TORCH_VITAL] " << tv.name << "." << m.first << "\t\t " - << m.second.value << "\n"; + os << "[TORCH_VITAL] " << tv.name << '.' << m.first << "\t\t " + << m.second.value << '\n'; } return os; } diff --git a/aten/src/ATen/core/alias_info.h b/aten/src/ATen/core/alias_info.h index bf0ff6ee72d..6a3335c328b 100644 --- a/aten/src/ATen/core/alias_info.h +++ b/aten/src/ATen/core/alias_info.h @@ -100,18 +100,18 @@ inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) { // this does match the way things are represented in the schema inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) { - out << "("; + out << '('; bool first = true; for (const auto& set : aliasInfo.beforeSets()) { if (first) { first = false; } else { - out << "|"; + out << '|'; } out << set.toUnqualString(); } if (aliasInfo.isWrite()) { - out << "!"; + out << '!'; } if (aliasInfo.beforeSets() != aliasInfo.afterSets()) { out << " -> "; @@ -120,12 +120,12 @@ inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) { if (first) { first = false; } else { - out << "|"; + out << '|'; } out << set.toUnqualString(); } } - out << ")"; + out << ')'; return out; } } // namespace c10 diff --git a/aten/src/ATen/core/blob.h b/aten/src/ATen/core/blob.h index 251da65e089..617d6a982ab 100644 --- a/aten/src/ATen/core/blob.h +++ b/aten/src/ATen/core/blob.h @@ -198,7 +198,7 @@ inline void swap(Blob& lhs, Blob& rhs) noexcept { } inline std::ostream& operator<<(std::ostream& out, const Blob& v) { - return out << "Blob[" << v.TypeName() << "]"; + return out << "Blob[" << v.TypeName() << ']'; } } // namespace caffe2 diff --git a/aten/src/ATen/core/class_type.cpp b/aten/src/ATen/core/class_type.cpp index 800d9ea0ef9..a65124e8097 100644 --- a/aten/src/ATen/core/class_type.cpp +++ b/aten/src/ATen/core/class_type.cpp @@ -456,8 +456,8 @@ bool ClassType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const { *why_not << "Method on class '" << repr_str() << "' (1) is not compatible with interface '" << rhs.repr_str() << "' (2)\n" - << " (1) " << self_method->getSchema() << "\n" - << " (2) " << schema << "\n"; + << " (1) " << self_method->getSchema() << '\n' + << " (2) " << schema << '\n'; } return false; } diff --git a/aten/src/ATen/core/class_type.h b/aten/src/ATen/core/class_type.h index ea537400ef7..f6f6bade9c9 100644 --- a/aten/src/ATen/core/class_type.h +++ b/aten/src/ATen/core/class_type.h @@ -100,7 +100,7 @@ struct TORCH_API ClassType : public NamedType { std::string repr_str() const override { std::stringstream ss; ss << str() - << " (of Python compilation unit at: " << compilation_unit().get() << ")"; + << " (of Python compilation unit at: " << compilation_unit().get() << ')'; return ss.str(); } diff --git a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp index 9180d0d19e6..369bd374747 100644 --- a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp +++ b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp @@ -58,12 +58,12 @@ std::string DispatchKeyExtractor::dumpState() const { std::ostringstream oss; for (const auto i : c10::irange(c10::utils::bitset::NUM_BITS())) { if (dispatch_arg_indices_reverse_.get(i)) { - oss << "1"; + oss << '1'; } else { - oss << "0"; + oss << '0'; } } - oss << " " << nonFallthroughKeys_ << "\n"; + oss << ' ' << nonFallthroughKeys_ << '\n'; return oss.str(); } diff --git a/aten/src/ATen/core/dispatch/Dispatcher.cpp b/aten/src/ATen/core/dispatch/Dispatcher.cpp index afcaf51f231..5facca30a54 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.cpp +++ b/aten/src/ATen/core/dispatch/Dispatcher.cpp @@ -69,8 +69,8 @@ private: void _print_dispatch_trace(const std::string& label, const std::string& op_name, const DispatchKeySet& dispatchKeySet) { auto nesting_value = dispatch_trace_nesting_value(); - for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " "; - std::cerr << label << " op=[" << op_name << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + for (int64_t i = 0; i < nesting_value; ++i) std::cerr << ' '; + std::cerr << label << " op=[" << op_name << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << ']' << std::endl; } } // namespace detail diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.cpp b/aten/src/ATen/core/dispatch/OperatorEntry.cpp index 928474ec333..e2627354971 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.cpp +++ b/aten/src/ATen/core/dispatch/OperatorEntry.cpp @@ -570,7 +570,7 @@ void OperatorEntry::checkInvariants() const { std::string OperatorEntry::listAllDispatchKeys() const { std::ostringstream str; - str << "["; + str << '['; bool has_kernels = false; for (auto k : allDispatchKeysInFullSet()) { @@ -584,7 +584,7 @@ std::string OperatorEntry::listAllDispatchKeys() const { str << k; has_kernels = true; } - str << "]"; + str << ']'; return str.str(); } @@ -683,12 +683,12 @@ void OperatorEntry::setReportErrorCallback_(std::unique_ptr c // This WON'T report backend fallbacks. std::string OperatorEntry::dumpState() const { std::ostringstream oss; - oss << "name: " << name_ << "\n"; + oss << "name: " << name_ << '\n'; if (schema_) { - oss << "schema: " << schema_->schema << "\n"; - oss << "debug: " << schema_->debug << "\n"; + oss << "schema: " << schema_->schema << '\n'; + oss << "debug: " << schema_->debug << '\n'; oss << "alias analysis kind: " << toString(schema_->schema.aliasAnalysis()) - << (schema_->schema.isDefaultAliasAnalysisKind() ? " (default)" : "") << "\n"; + << (schema_->schema.isDefaultAliasAnalysisKind() ? " (default)" : "") << '\n'; } else { oss << "schema: (none)\n"; } diff --git a/aten/src/ATen/core/function_schema.cpp b/aten/src/ATen/core/function_schema.cpp index 6587af0f9cc..ffccbe282dd 100644 --- a/aten/src/ATen/core/function_schema.cpp +++ b/aten/src/ATen/core/function_schema.cpp @@ -7,7 +7,7 @@ namespace c10 { void FunctionSchema::dump() const { - std::cout << *this << "\n"; + std::cout << *this << '\n'; } const std::vector& FunctionSchema::getCorrectList(SchemaArgType type) const { @@ -210,9 +210,9 @@ std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { out << schema.name(); if (!schema.overload_name().empty()) { - out << "." << schema.overload_name(); + out << '.' << schema.overload_name(); } - out << "("; + out << '('; bool seen_kwarg_only = false; for (const auto i : c10::irange(schema.arguments().size())) { @@ -273,7 +273,7 @@ std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { } if (need_paren) { - out << "("; + out << '('; } for (const auto i : c10::irange(returns.size())) { if (i > 0) { @@ -288,7 +288,7 @@ std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { out << "..."; } if (need_paren) { - out << ")"; + out << ')'; } return out; } @@ -471,7 +471,7 @@ bool FunctionSchema::isForwardCompatibleWith( if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) { if (why_not) { why_not - << "'" << arguments().at(i).name() << "'" + << '\'' << arguments().at(i).name() << '\'' << " is not forward compatible with the older version of the schema"; } return false; @@ -511,7 +511,7 @@ bool FunctionSchema::isForwardCompatibleWith( .isForwardCompatibleWith(old.arguments().at(i))) { if (why_not) { why_not << "Out argument '" - << "'" << arguments().at(i).name() + << '\'' << arguments().at(i).name() << " is not FC with the older version of the schema"; } return false; diff --git a/aten/src/ATen/core/function_schema.h b/aten/src/ATen/core/function_schema.h index c3e1520dc98..f349567c264 100644 --- a/aten/src/ATen/core/function_schema.h +++ b/aten/src/ATen/core/function_schema.h @@ -571,7 +571,7 @@ inline std::ostream& operator<<(std::ostream& out, const Argument& arg) { if (arg.N()) { N = std::to_string(*arg.N()); } - out << "[" << N << "]"; + out << '[' << N << ']'; } else { out << unopt_type->str(); } @@ -582,15 +582,15 @@ inline std::ostream& operator<<(std::ostream& out, const Argument& arg) { } if (is_opt) { - out << "?"; + out << '?'; } if (!arg.name().empty()) { - out << " " << arg.name(); + out << ' ' << arg.name(); } if (arg.default_value()) { - out << "="; + out << '='; if ((type->kind() == c10::TypeKind::StringType || unopt_type->kind() == c10::TypeKind::StringType) && arg.default_value().value().isString()) { diff --git a/aten/src/ATen/core/ivalue.cpp b/aten/src/ATen/core/ivalue.cpp index 1ff8dd04109..6e4ee82ab11 100644 --- a/aten/src/ATen/core/ivalue.cpp +++ b/aten/src/ATen/core/ivalue.cpp @@ -66,7 +66,7 @@ bool operator==(const ivalue::Tuple& lhs, const ivalue::Tuple& rhs) { } std::ostream& operator<<(std::ostream& out, const ivalue::EnumHolder& v) { - out << v.qualifiedClassName() << "." << v.name(); + out << v.qualifiedClassName() << '.' << v.name(); return out; } @@ -526,7 +526,7 @@ std::ostream& printMaybeAnnotatedList( !elementTypeCanBeInferredFromMembers(list_elem_type)) { out << "annotate(" << the_list.type()->annotation_str() << ", "; printList(out, the_list.toListRef(), "[", "]", formatter); - out << ")"; + out << ')'; return out; } else { return printList(out, the_list.toListRef(), "[", "]", formatter); @@ -538,7 +538,7 @@ std::ostream& printDict( std::ostream& out, const Dict& v, const IValueFormatter& formatter) { - out << "{"; + out << '{'; bool first = true; for (const auto& pair : v) { @@ -552,7 +552,7 @@ std::ostream& printDict( first = false; } - out << "}"; + out << '}'; return out; } } @@ -565,8 +565,8 @@ static std::ostream& printMaybeAnnotatedDict( auto value_type = the_dict.type()->castRaw()->getValueType(); if (the_dict.toGenericDict().empty() || !elementTypeCanBeInferredFromMembers(value_type)) { - out << "annotate(" << the_dict.type()->annotation_str() << ","; - printDict(out, the_dict.toGenericDict(), formatter) << ")"; + out << "annotate(" << the_dict.type()->annotation_str() << ','; + printDict(out, the_dict.toGenericDict(), formatter) << ')'; } else { return printDict(out, the_dict.toGenericDict(), formatter); } @@ -577,7 +577,7 @@ static std::ostream& printComplex(std::ostream & out, const IValue & v) { c10::complex d = v.toComplexDouble(); IValue real(d.real()), imag(std::abs(d.imag())); auto sign = d.imag() >= 0 ? '+' : '-'; - return out << real << sign << imag << "j"; + return out << real << sign << imag << 'j'; } std::ostream& IValue::repr( @@ -605,9 +605,9 @@ std::ostream& IValue::repr( if (static_cast(i) == d) { // -0.0 (signed zero) needs to be parsed as -0. if (i == 0 && std::signbit(d)) { - return out << "-" << i << "."; + return out << '-' << i << '.'; } - return out << i << "."; + return out << i << '.'; } } auto orig_prec = out.precision(); @@ -643,20 +643,20 @@ std::ostream& IValue::repr( device_stream << v.toDevice(); out << "torch.device("; c10::printQuotedString(out, device_stream.str()); - return out << ")"; + return out << ')'; } case IValue::Tag::Generator: { auto generator = v.toGenerator(); out << "torch.Generator(device="; c10::printQuotedString(out, generator.device().str()); - out << ", seed=" << generator.current_seed() << ")"; + out << ", seed=" << generator.current_seed() << ')'; return out; } case IValue::Tag::GenericDict: return printMaybeAnnotatedDict(out, v, formatter); case IValue::Tag::Enum: { auto enum_holder = v.toEnumHolder(); - return out << enum_holder->qualifiedClassName() << "." << + return out << enum_holder->qualifiedClassName() << '.' << enum_holder->name(); } case IValue::Tag::Object: { @@ -801,7 +801,7 @@ std::ostream& operator<<(std::ostream & out, const IValue & v) { if (c == FP_NORMAL || c == FP_ZERO) { int64_t i = static_cast(d); if (static_cast(i) == d) { - return out << i << "."; + return out << i << '.'; } } auto orig_prec = out.precision(); @@ -852,7 +852,7 @@ std::ostream& operator<<(std::ostream & out, const IValue & v) { return printDict(out, v.toGenericDict(), formatter); case IValue::Tag::PyObject: { auto py_obj = v.toPyObject(); - return out << ""; + return out << "'; } case IValue::Tag::Generator: return out << "Generator"; @@ -862,22 +862,22 @@ std::ostream& operator<<(std::ostream & out, const IValue & v) { // TODO we should attempt to call __str__ if the object defines it. auto obj = v.toObject(); // print this out the way python would do it - return out << "<" << obj->name() << " object at " << obj.get() << ">"; + return out << '<' << obj->name() << " object at " << obj.get() << '>'; } case IValue::Tag::Enum: { auto enum_holder = v.toEnumHolder(); - return out << "Enum<" << enum_holder->unqualifiedClassName() << "." << - enum_holder->name() << ">"; + return out << "Enum<" << enum_holder->unqualifiedClassName() << '.' << + enum_holder->name() << '>'; } } - return out << ""; + return out << " ivalue::Object::type() const { @@ -1050,7 +1050,7 @@ c10::intrusive_ptr ivalue::Object::deepcopy( std::stringstream err; err << "Cannot serialize custom bound C++ class"; if (auto qualname = type()->name()) { - err << " " << qualname->qualifiedName(); + err << ' ' << qualname->qualifiedName(); } err << ". Please define serialization methods via def_pickle() for " "this class."; diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h index 666d1ade578..535831ea11d 100644 --- a/aten/src/ATen/core/jit_type.h +++ b/aten/src/ATen/core/jit_type.h @@ -211,7 +211,7 @@ struct TORCH_API OptionalType : public UnionType { std::string str() const override { std::stringstream ss; - ss << getElementType()->str() << "?"; + ss << getElementType()->str() << '?'; return ss.str(); } @@ -240,7 +240,7 @@ struct TORCH_API OptionalType : public UnionType { std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::stringstream ss; - ss << "Optional[" << getElementType()->annotation_str(printer) << "]"; + ss << "Optional[" << getElementType()->annotation_str(printer) << ']'; return ss.str(); } }; @@ -906,7 +906,7 @@ struct TORCH_API ListType std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::stringstream ss; - ss << "List[" << getElementType()->annotation_str(printer) << "]"; + ss << "List[" << getElementType()->annotation_str(printer) << ']'; return ss.str(); } }; @@ -946,7 +946,7 @@ struct TORCH_API DictType : public SharedType { std::string str() const override { std::stringstream ss; ss << "Dict(" << getKeyType()->str() << ", " << getValueType()->str() - << ")"; + << ')'; return ss.str(); } @@ -1018,7 +1018,7 @@ struct TORCH_API FutureType std::string str() const override { std::stringstream ss; - ss << "Future(" << getElementType()->str() << ")"; + ss << "Future(" << getElementType()->str() << ')'; return ss.str(); } TypePtr createWithContained( @@ -1041,7 +1041,7 @@ struct TORCH_API FutureType std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::stringstream ss; - ss << "Future[" << getElementType()->annotation_str(printer) << "]"; + ss << "Future[" << getElementType()->annotation_str(printer) << ']'; return ss.str(); } }; @@ -1060,7 +1060,7 @@ struct TORCH_API AwaitType std::string str() const override { std::stringstream ss; - ss << "Await(" << getElementType()->str() << ")"; + ss << "Await(" << getElementType()->str() << ')'; return ss.str(); } TypePtr createWithContained( @@ -1083,7 +1083,7 @@ struct TORCH_API AwaitType std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::stringstream ss; - ss << "Await[" << getElementType()->annotation_str(printer) << "]"; + ss << "Await[" << getElementType()->annotation_str(printer) << ']'; return ss.str(); } }; @@ -1102,7 +1102,7 @@ struct TORCH_API RRefType std::string str() const override { std::stringstream ss; - ss << "RRef(" << getElementType()->str() << ")"; + ss << "RRef(" << getElementType()->str() << ')'; return ss.str(); } TypePtr createWithContained( @@ -1115,7 +1115,7 @@ struct TORCH_API RRefType std::string annotation_str_impl(const TypePrinter& printer = nullptr) const override { std::stringstream ss; - ss << "RRef[" << getElementType()->annotation_str(printer) << "]"; + ss << "RRef[" << getElementType()->annotation_str(printer) << ']'; return ss.str(); } }; diff --git a/aten/src/ATen/core/operator_name.cpp b/aten/src/ATen/core/operator_name.cpp index 43a1fd24749..e55a84a4d30 100644 --- a/aten/src/ATen/core/operator_name.cpp +++ b/aten/src/ATen/core/operator_name.cpp @@ -11,7 +11,7 @@ std::string toString(const OperatorName& opName) { std::ostream& operator<<(std::ostream& os, const OperatorName& opName) { os << opName.name; if (!opName.overload_name.empty()) { - os << "." << opName.overload_name; + os << '.' << opName.overload_name; } return os; } diff --git a/aten/src/ATen/core/tensor_type.cpp b/aten/src/ATen/core/tensor_type.cpp index 9d8080cb8f3..d428aceb3d0 100644 --- a/aten/src/ATen/core/tensor_type.cpp +++ b/aten/src/ATen/core/tensor_type.cpp @@ -65,7 +65,7 @@ VaryingShape VaryingShape::merge(const VaryingShape& other) const { template std::ostream& operator<<(std::ostream& out, const VaryingShape& vs) { - out << "("; + out << '('; if (!vs.size()) { out << "*)"; return out; @@ -79,10 +79,10 @@ std::ostream& operator<<(std::ostream& out, const VaryingShape& vs) { if (v.has_value()) { out << v.value(); } else { - out << "*"; + out << '*'; } } - out << ")"; + out << ')'; return out; } @@ -105,7 +105,7 @@ std::ostream& operator<<( } auto sizes_opt = ss.sizes(); - os << "("; + os << '('; for (size_t i = 0; i < rank_opt.value(); i++) { if (i > 0) { os << ", "; @@ -113,10 +113,10 @@ std::ostream& operator<<( if(sizes_opt.has_value() && sizes_opt.value()[i].is_static()) { os << sizes_opt.value()[i]; } else { - os << "*"; + os << '*'; } } - os << ")"; + os << ')'; return os; } @@ -131,17 +131,17 @@ std::ostream& operator<<(std::ostream& os, const ShapeSymbol& s) { } std::ostream& operator<<(std::ostream& os, const Stride& s) { - os << "{"; + os << '{'; if (s.stride_index_.has_value()) { os << *s.stride_index_; } else { - os << "*"; + os << '*'; } - os << ":"; + os << ':'; if (s.stride_.has_value()) { os << *s.stride_; } else { - os << "*"; + os << '*'; } os << '}'; return os; diff --git a/aten/src/ATen/core/type.cpp b/aten/src/ATen/core/type.cpp index abba4e14583..46dc550b1f3 100644 --- a/aten/src/ATen/core/type.cpp +++ b/aten/src/ATen/core/type.cpp @@ -67,7 +67,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { bool has_valid_strides_info = ndim > 0 && value->strides().isComplete() && value->strides().size() == ndim; - out << "("; + out << '('; size_t i = 0; bool symbolic = type_verbosity() == TypeVerbosity::Symbolic; for (i = 0; i < *ndim; ++i) { @@ -79,7 +79,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { } else if (symbolic) { out << value->symbolic_sizes().at(i); } else { - out << "*"; + out << '*'; } } if (has_valid_strides_info && @@ -91,7 +91,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { } out << value->strides()[i].value(); } - out << "]"; + out << ']'; } if (type_verbosity() >= TypeVerbosity::Full) { if (value->requiresGrad()) { @@ -107,12 +107,12 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { out << "device=" << *value->device(); } } - out << ")"; + out << ')'; } else { if (type_verbosity() >= TypeVerbosity::Full) { size_t i = 0; if (value->requiresGrad()) { - out << "(" + out << '(' << "requires_grad=" << *value->requiresGrad(); i++; } @@ -120,7 +120,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { out << ((i++ > 0) ? ", " : "(") << "device=" << *value->device(); } if (i > 0) { - out << ")"; + out << ')'; } } } @@ -133,18 +133,18 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { out << *prim << "[]"; } else if (t.kind() == TypeKind::OptionalType) { auto prim = t.castRaw()->getElementType(); - out << *prim << "?"; + out << *prim << '?'; } else if(t.kind() == TypeKind::FutureType) { auto elem = t.castRaw()->getElementType(); - out << "Future[" << *elem << "]"; + out << "Future[" << *elem << ']'; } else if(t.kind() == TypeKind::RRefType) { auto elem = t.castRaw()->getElementType(); - out << "RRef[" << *elem << "]"; + out << "RRef[" << *elem << ']'; } else if(auto tup = t.cast()) { if (tup->schema()) { out << "NamedTuple"; } - out << "("; + out << '('; for(size_t i = 0; i < tup->elements().size(); ++i) { if(i > 0) out << ", "; @@ -160,7 +160,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { out << *(tup->elements()[i]); } } - out << ")"; + out << ')'; } else if (t.kind() == TypeKind::FunctionType) { out << "Function"; } else { @@ -475,7 +475,7 @@ std::optional unifyTypeList( why_not << "Could not unify type list since element " << i << " of type " << elements.at(i)->repr_str() << " did not match the types before it (" - << ret_type->repr_str() << ")"; + << ret_type->repr_str() << ')'; return std::nullopt; } ret_type = *maybe_unified; @@ -907,13 +907,13 @@ std::string TupleType::str() const { // NOLINTNEXTLINE(bugprone-unchecked-optional-access) ss << name()->qualifiedName(); } else { - ss << "("; + ss << '('; for(size_t i = 0; i < elements().size(); ++i) { if(i > 0) ss << ", "; ss << elements()[i]->str(); } - ss << ")"; + ss << ')'; } return ss.str(); } @@ -1003,8 +1003,8 @@ bool InterfaceType::isSubTypeImpl( *why_not << "Method on interface '" << lhs.repr_str() << "' (1) is not compatible with interface '" << rhs.repr_str() << "' (2)\n" - << " (1) " << *self_schema << "\n" - << " (2) " << schema << "\n"; + << " (1) " << *self_schema << '\n' + << " (2) " << schema << '\n'; return false; } return false; @@ -1078,7 +1078,7 @@ SymbolicShape SymbolicShape::merge(const SymbolicShape& other) const { } void SymbolicShape::dump() const { - std::cout << *this << "\n"; + std::cout << *this << '\n'; } bool EnumType::isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const { diff --git a/aten/src/ATen/core/union_type.cpp b/aten/src/ATen/core/union_type.cpp index dc4cb788721..8731c2cbc49 100644 --- a/aten/src/ATen/core/union_type.cpp +++ b/aten/src/ATen/core/union_type.cpp @@ -205,9 +205,9 @@ UnionType::UnionType(std::vector reference, TypeKind kind) : SharedType for (const auto i : c10::irange(reference.size())) { msg << reference[i]->repr_str(); if (i > 0) { - msg << ","; + msg << ','; } - msg << " "; + msg << ' '; } msg << "} has the single type " << types_[0]->repr_str() << ". Use the common supertype instead of creating a Union" diff --git a/aten/src/ATen/cpu/vec/vec256/vec256.h b/aten/src/ATen/cpu/vec/vec256/vec256.h index 50c3cc31a6c..a2eb9e5f451 100644 --- a/aten/src/ATen/cpu/vec/vec256/vec256.h +++ b/aten/src/ATen/cpu/vec/vec256/vec256.h @@ -80,7 +80,7 @@ std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) { } stream << buf[i]; } - stream << "]"; + stream << ']'; return stream; } diff --git a/aten/src/ATen/cpu/vec/vec512/vec512.h b/aten/src/ATen/cpu/vec/vec512/vec512.h index 975b71ce9a8..623971454df 100644 --- a/aten/src/ATen/cpu/vec/vec512/vec512.h +++ b/aten/src/ATen/cpu/vec/vec512/vec512.h @@ -55,7 +55,7 @@ std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) { } stream << buf[i]; } - stream << "]"; + stream << ']'; return stream; } diff --git a/aten/src/ATen/cuda/detail/CUDAHooks.cpp b/aten/src/ATen/cuda/detail/CUDAHooks.cpp index 594045a1b41..b2b9be4498e 100644 --- a/aten/src/ATen/cuda/detail/CUDAHooks.cpp +++ b/aten/src/ATen/cuda/detail/CUDAHooks.cpp @@ -411,16 +411,16 @@ std::string CUDAHooks::showConfig() const { // HIP_VERSION value format was changed after ROCm v4.2 to include the patch number if(v < 500) { // If major=xx, minor=yy then format -> xxyy - oss << (v / 100) << "." << (v % 10); + oss << (v / 100) << '.' << (v % 10); } else { // If major=xx, minor=yy & patch=zzzzz then format -> xxyyzzzzz - oss << (v / 10000000) << "." << (v / 100000 % 100) << "." << (v % 100000); + oss << (v / 10000000) << '.' << (v / 100000 % 100) << '.' << (v % 100000); } #else - oss << (v / 1000) << "." << (v / 10 % 100); + oss << (v / 1000) << '.' << (v / 10 % 100); if (v % 10 != 0) { - oss << "." << (v % 10); + oss << '.' << (v % 10); } #endif }; @@ -431,16 +431,16 @@ std::string CUDAHooks::showConfig() const { oss << " - HIP Runtime "; #endif printCudaStyleVersion(runtimeVersion); - oss << "\n"; + oss << '\n'; // TODO: Make HIPIFY understand CUDART_VERSION macro #if !defined(USE_ROCM) if (runtimeVersion != CUDART_VERSION) { oss << " - Built with CUDA Runtime "; printCudaStyleVersion(CUDART_VERSION); - oss << "\n"; + oss << '\n'; } - oss << " - NVCC architecture flags: " << NVCC_FLAGS_EXTRA << "\n"; + oss << " - NVCC architecture flags: " << NVCC_FLAGS_EXTRA << '\n'; #endif #if !defined(USE_ROCM) @@ -448,9 +448,9 @@ std::string CUDAHooks::showConfig() const { auto printCudnnStyleVersion = [&](size_t v) { - oss << (v / 1000) << "." << (v / 100 % 10); + oss << (v / 1000) << '.' << (v / 100 % 10); if (v % 100 != 0) { - oss << "." << (v % 100); + oss << '.' << (v % 100); } }; @@ -461,22 +461,22 @@ std::string CUDAHooks::showConfig() const { if (cudnnCudartVersion != CUDART_VERSION) { oss << " (built against CUDA "; printCudaStyleVersion(cudnnCudartVersion); - oss << ")"; + oss << ')'; } - oss << "\n"; + oss << '\n'; if (cudnnVersion != CUDNN_VERSION) { oss << " - Built with CuDNN "; printCudnnStyleVersion(CUDNN_VERSION); - oss << "\n"; + oss << '\n'; } #endif #else // TODO: Check if miopen has the functions above and unify - oss << " - MIOpen " << MIOPEN_VERSION_MAJOR << "." << MIOPEN_VERSION_MINOR << "." << MIOPEN_VERSION_PATCH << "\n"; + oss << " - MIOpen " << MIOPEN_VERSION_MAJOR << '.' << MIOPEN_VERSION_MINOR << '.' << MIOPEN_VERSION_PATCH << '\n'; #endif #if AT_MAGMA_ENABLED() - oss << " - Magma " << MAGMA_VERSION_MAJOR << "." << MAGMA_VERSION_MINOR << "." << MAGMA_VERSION_MICRO << "\n"; + oss << " - Magma " << MAGMA_VERSION_MAJOR << '.' << MAGMA_VERSION_MINOR << '.' << MAGMA_VERSION_MICRO << '\n'; #endif return oss.str(); diff --git a/aten/src/ATen/cuda/jiterator.cu b/aten/src/ATen/cuda/jiterator.cu index 3af5104288d..d664c828bda 100644 --- a/aten/src/ATen/cuda/jiterator.cu +++ b/aten/src/ATen/cuda/jiterator.cu @@ -42,7 +42,7 @@ static inline void launch_jitted_vectorized_kernel_dynamic( // The cache key includes all the parameters to generate_code + vec_size + dev_idx std::stringstream ss; - ss << nInputs << "_" << nOutputs << f; + ss << nInputs << '_' << nOutputs << f; ss << f_inputs_type_str << compute_type_str << result_type_str; ss << static_cast(at::cuda::jit::BinaryFuncVariant::NoScalar); ss << extra_args_types; @@ -144,7 +144,7 @@ static inline void launch_jitted_unrolled_kernel_dynamic( // The cache key includes all the parameters to generate_code + dev_idx std::stringstream ss; - ss << nInputs << "_" << nOutputs << f; + ss << nInputs << '_' << nOutputs << f; ss << f_inputs_type_str << compute_type_str << result_type_str; ss << contiguous << dynamic_casting; ss << static_cast(at::cuda::jit::BinaryFuncVariant::NoScalar); diff --git a/aten/src/ATen/cuda/tunable/Tunable.cpp b/aten/src/ATen/cuda/tunable/Tunable.cpp index 9fb04b40d30..eb7e381d277 100644 --- a/aten/src/ATen/cuda/tunable/Tunable.cpp +++ b/aten/src/ATen/cuda/tunable/Tunable.cpp @@ -52,10 +52,10 @@ TuningContext* getTuningContext() { std::ostream& operator<<(std::ostream& stream, const ResultEntry& entry) { static const bool blaslog = c10::utils::get_env("PYTORCH_TUNABLEOP_BLAS_LOG") == "1"; if (!blaslog) { - return stream << entry.key_ << "," << entry.time_; + return stream << entry.key_ << ',' << entry.time_; } else { - return stream << entry.key_ << "," << entry.time_ << ",BLAS_PARAMS: " << entry.blas_sig_; + return stream << entry.key_ << ',' << entry.time_ << ",BLAS_PARAMS: " << entry.blas_sig_; } } @@ -156,10 +156,10 @@ void TuningResultsManager::RecordUntuned( std::ofstream& untuned_file, const std if (isNew) { static const bool blaslog = c10::utils::get_env("PYTORCH_TUNABLEOP_BLAS_LOG") == "1"; if (!blaslog) { - untuned_file << op_signature << "," << params_signature << std::endl; + untuned_file << op_signature << ',' << params_signature << std::endl; } else { - untuned_file << op_signature << "," << params_signature << ",BLAS_PARAMS: " << blas_signature << std::endl; + untuned_file << op_signature << ',' << params_signature << ",BLAS_PARAMS: " << blas_signature << std::endl; } TUNABLE_LOG3("Untuned,", op_signature, ",", params_signature); } @@ -201,7 +201,7 @@ void TuningResultsManager::InitRealtimeAppend(const std::string& filename, const if(!file_exists || file_empty) { for(const auto& [key, val] : validators) { - (*realtime_out_) << "Validator," << key << "," << val << std::endl; + (*realtime_out_) << "Validator," << key << ',' << val << std::endl; realtime_out_->flush(); } validators_written_ = true; @@ -219,7 +219,7 @@ void TuningResultsManager::AppendResultLine(const std::string& op_sig, const std return; } - (*realtime_out_) << op_sig << "," << param_sig << "," << result << std::endl; + (*realtime_out_) << op_sig << ',' << param_sig << ',' << result << std::endl; realtime_out_->flush(); //ensure immediate write to disk TUNABLE_LOG3("Realtime append: ", op_sig, "(", param_sig, ") -> ", result); diff --git a/aten/src/ATen/cudnn/Descriptors.cpp b/aten/src/ATen/cudnn/Descriptors.cpp index 8636d267209..a2cb0cb0a10 100644 --- a/aten/src/ATen/cudnn/Descriptors.cpp +++ b/aten/src/ATen/cudnn/Descriptors.cpp @@ -93,31 +93,31 @@ std::string cudnnTypeToString(cudnnDataType_t dtype) { return "CUDNN_DATA_UINT8x4"; default: std::ostringstream oss; - oss << "(unknown data-type " << static_cast(dtype) << ")"; + oss << "(unknown data-type " << static_cast(dtype) << ')'; return oss.str(); } } std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d) { - out << "TensorDescriptor " << static_cast(d.desc()) << "\n"; + out << "TensorDescriptor " << static_cast(d.desc()) << '\n'; int nbDims = 0; int dimA[CUDNN_DIM_MAX]; int strideA[CUDNN_DIM_MAX]; cudnnDataType_t dtype{}; cudnnGetTensorNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &nbDims, dimA, strideA); - out << " type = " << cudnnTypeToString(dtype) << "\n"; - out << " nbDims = " << nbDims << "\n"; + out << " type = " << cudnnTypeToString(dtype) << '\n'; + out << " nbDims = " << nbDims << '\n'; // Read out only nbDims of the arrays! out << " dimA = "; for (auto i : ArrayRef{dimA, static_cast(nbDims)}) { out << i << ", "; } - out << "\n"; + out << '\n'; out << " strideA = "; for (auto i : ArrayRef{strideA, static_cast(nbDims)}) { out << i << ", "; } - out << "\n"; + out << '\n'; return out; } @@ -168,27 +168,27 @@ std::string cudnnMemoryFormatToString(cudnnTensorFormat_t tformat) { return "CUDNN_TENSOR_NHWC"; default: std::ostringstream oss; - oss << "(unknown cudnn tensor format " << static_cast(tformat) << ")"; + oss << "(unknown cudnn tensor format " << static_cast(tformat) << ')'; return oss.str(); } } std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d) { - out << "FilterDescriptor " << static_cast(d.desc()) << "\n"; + out << "FilterDescriptor " << static_cast(d.desc()) << '\n'; int nbDims = 0; int dimA[CUDNN_DIM_MAX]; cudnnDataType_t dtype{}; cudnnTensorFormat_t tformat{}; cudnnGetFilterNdDescriptor(d.desc(), CUDNN_DIM_MAX, &dtype, &tformat, &nbDims, dimA); - out << " type = " << cudnnTypeToString(dtype) << "\n"; - out << " tensor_format = " << cudnnMemoryFormatToString(tformat) << "\n"; - out << " nbDims = " << nbDims << "\n"; + out << " type = " << cudnnTypeToString(dtype) << '\n'; + out << " tensor_format = " << cudnnMemoryFormatToString(tformat) << '\n'; + out << " nbDims = " << nbDims << '\n'; // Read out only nbDims of the arrays! out << " dimA = "; for (auto i : ArrayRef{dimA, static_cast(nbDims)}) { out << i << ", "; } - out << "\n"; + out << '\n'; return out; } diff --git a/aten/src/ATen/functorch/DynamicLayer.cpp b/aten/src/ATen/functorch/DynamicLayer.cpp index 69af08a7bd7..518098a8b4a 100644 --- a/aten/src/ATen/functorch/DynamicLayer.cpp +++ b/aten/src/ATen/functorch/DynamicLayer.cpp @@ -346,15 +346,15 @@ void foreachTensorInplaceWithFlag(std::vector& args, int64_t begin, int6 } std::ostream& operator<< (std::ostream& os, const DynamicLayer& layer) { - os << layer.layerId() << ":" << layer.key(); + os << layer.layerId() << ':' << layer.key(); return os; } std::ostream& operator<< (std::ostream& os, const std::vector& dls) { os << "DynamicLayerStack[ "; for (const auto& layer : dls) { - os << layer << " "; + os << layer << ' '; } - os << "]"; + os << ']'; return os; } diff --git a/aten/src/ATen/functorch/TensorWrapper.cpp b/aten/src/ATen/functorch/TensorWrapper.cpp index 65de9268927..ba5dcfc9238 100644 --- a/aten/src/ATen/functorch/TensorWrapper.cpp +++ b/aten/src/ATen/functorch/TensorWrapper.cpp @@ -22,7 +22,7 @@ void dumpTensor(std::ostream& ss, const Tensor& tensor) { if (batched) { ss << "Batched[lvl=" << batched->level() << " dim=" << batched->bdim() << ", "; dumpTensor(ss, batched->value()); - ss << "]"; + ss << ']'; return; } ss << "Tensor" << tensor.sizes(); @@ -36,7 +36,7 @@ void dumpTensor(std::ostream& ss, const Tensor& tensor) { ss << "dead, "; } dumpTensor(ss, wrapped->value()); - ss << "]"; + ss << ']'; } void TensorWrapper::refreshMetadata() { diff --git a/aten/src/ATen/miopen/Descriptors.cpp b/aten/src/ATen/miopen/Descriptors.cpp index 86e42ee3b66..3fe27c7a082 100644 --- a/aten/src/ATen/miopen/Descriptors.cpp +++ b/aten/src/ATen/miopen/Descriptors.cpp @@ -73,32 +73,32 @@ std::string miopenTypeToString(miopenDataType_t dtype) { return "miopenBFloat16"; default: std::ostringstream oss; - oss << "(unknown data-type " << static_cast(dtype) << ")"; + oss << "(unknown data-type " << static_cast(dtype) << ')'; return oss.str(); } } std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d) { - out << "TensorDescriptor " << static_cast(d.desc()) << "\n"; + out << "TensorDescriptor " << static_cast(d.desc()) << '\n'; int nbDims = 0; int dimA[MIOPEN_DIM_MAX]; int strideA[MIOPEN_DIM_MAX]; miopenDataType_t dtype; miopenGetTensorDescriptorSize(d.desc(), &nbDims); miopenGetTensorDescriptor(d.desc(), &dtype, dimA, strideA); - out << " type = " << miopenTypeToString(dtype) << "\n"; - out << " nbDims = " << nbDims << "\n"; + out << " type = " << miopenTypeToString(dtype) << '\n'; + out << " nbDims = " << nbDims << '\n'; // Read out only nbDims of the arrays! out << " dimA = "; for (auto i : ArrayRef{dimA, static_cast(nbDims)}) { out << i << ", "; } - out << "\n"; + out << '\n'; out << " strideA = "; for (auto i : ArrayRef{strideA, static_cast(nbDims)}) { out << i << ", "; } - out << "\n"; + out << '\n'; return out; } diff --git a/aten/src/ATen/mps/MPSProfiler.h b/aten/src/ATen/mps/MPSProfiler.h index c1cb9090fc4..187e86d92e1 100644 --- a/aten/src/ATen/mps/MPSProfiler.h +++ b/aten/src/ATen/mps/MPSProfiler.h @@ -91,7 +91,7 @@ struct OperationInfo : BaseInfo { std::stringstream kernelStr; kernelStr << kernelName; for (const Tensor& tensor : tensors) { - kernelStr << ":" << BaseInfo::buildTensorString(tensor, includeBufferId); + kernelStr << ':' << BaseInfo::buildTensorString(tensor, includeBufferId); } return kernelStr.str(); } diff --git a/aten/src/ATen/mps/MPSProfiler.mm b/aten/src/ATen/mps/MPSProfiler.mm index a91574c56c5..1d0408b8089 100644 --- a/aten/src/ATen/mps/MPSProfiler.mm +++ b/aten/src/ATen/mps/MPSProfiler.mm @@ -39,9 +39,9 @@ std::string BaseInfo::buildTensorString(const Tensor& tensor, bool includeBuffer // see comments for INCLUDE_BUFFER_ID if (includeBufferId && deviceType == at::kMPS) { id buffer = __builtin_bit_cast(id, tensor.storage().data()); - tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer)) << ":" << buffer.retainCount << ")"; + tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer)) << ':' << buffer.retainCount << ')'; } - tensorStr << ":" << tensor.scalar_type() << tensor.sizes(); + tensorStr << ':' << tensor.scalar_type() << tensor.sizes(); return tensorStr.str(); } else { return "undefined"; diff --git a/aten/src/ATen/native/ConvUtils.h b/aten/src/ATen/native/ConvUtils.h index 892144ac663..2a3388a0526 100644 --- a/aten/src/ATen/native/ConvUtils.h +++ b/aten/src/ATen/native/ConvUtils.h @@ -167,7 +167,7 @@ static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, co std::stringstream ss; ss << arg_name << " should be greater than zero but got ("; std::copy(args.begin(), args.end() - 1, std::ostream_iterator(ss,", ")); - ss << args.back() << ")" << " (while checking arguments for " << c << ")"; + ss << args.back() << ")" << " (while checking arguments for " << c << ')'; TORCH_CHECK(false, ss.str()); } } diff --git a/aten/src/ATen/native/Convolution.cpp b/aten/src/ATen/native/Convolution.cpp index ca3a4f5f3fa..cb37f6f1030 100644 --- a/aten/src/ATen/native/Convolution.cpp +++ b/aten/src/ATen/native/Convolution.cpp @@ -639,7 +639,7 @@ static std::ostream& operator<<(std::ostream & out, const ConvParams& params) << " deterministic = " << params.deterministic << " cudnn_enabled = " << params.cudnn_enabled << " allow_tf32 = " << params.allow_tf32 - << "}"; + << '}'; return out; } diff --git a/aten/src/ATen/native/SpectralOps.cpp b/aten/src/ATen/native/SpectralOps.cpp index 79aaac48034..975e237c468 100644 --- a/aten/src/ATen/native/SpectralOps.cpp +++ b/aten/src/ATen/native/SpectralOps.cpp @@ -847,7 +847,7 @@ Tensor stft(const Tensor& self, const int64_t n_fft, const std::optional( // stride_output_h + group_count); - // std::cout << "PTRS " << mat_a.data_ptr() << " " << mat_b.data_ptr() << " + // std::cout << "PTRS " << mat_a.data_ptr() << ' ' << mat_b.data_ptr() << " // " - // << out.data_ptr() << " " << scale_a.data_ptr() << " " + // << out.data_ptr() << ' ' << scale_a.data_ptr() << ' ' // << scale_b.data_ptr() << "\n"; // for (int i = 0; i < group_count; i++) { // std::cout << "A " << (void*)inputA_ptrs_h[i] << "\n"; diff --git a/aten/src/ATen/native/cuda/jit_utils.cpp b/aten/src/ATen/native/cuda/jit_utils.cpp index 09c8e74d4b2..e65fa4ceb38 100644 --- a/aten/src/ATen/native/cuda/jit_utils.cpp +++ b/aten/src/ATen/native/cuda/jit_utils.cpp @@ -1057,14 +1057,14 @@ std::string generate_code( // TODO these arrays are potentially of the different types, use function // traits to determine the types declare_load_arrays << f_inputs_type << " arg" << std::to_string(i) - << "[" << std::to_string(thread_work_size) << "];\n"; + << '[' << std::to_string(thread_work_size) << "];\n"; } env.s("declare_load_arrays", declare_load_arrays.str()); std::stringstream declare_store_arrays; for (int i = 0; i < nOutputs; i++) { declare_store_arrays << result_type << " out" << std::to_string(i) - << "[" << std::to_string(thread_work_size) << "];\n"; + << '[' << std::to_string(thread_work_size) << "];\n"; } env.s("declare_store_arrays", declare_store_arrays.str()); @@ -1217,7 +1217,7 @@ std::string generate_code( for (const auto i : c10::irange(nInputs)){ auto i_string = std::to_string(i); vector_inputs << "auto * input" << i_string << - " = reinterpret_cast(data[" << i_string << "+" << nOutputs << "])" << + " = reinterpret_cast(data[" << i_string << '+' << nOutputs << "])" << " + block_work_size * idx;\n"; } env.s("vector_inputs", vector_inputs.str()); @@ -1543,17 +1543,17 @@ NvrtcFunction jit_pwise_function( // Constructs file path by appending constructed cubin name to cache path std::stringstream ss; - ss << *cache_dir << "/"; + ss << *cache_dir << '/'; ss << kernel_name; #ifdef USE_ROCM ss << "_arch" << prop->gcnArchName; #else - ss << "_arch" << cuda_major << "." << cuda_minor; + ss << "_arch" << cuda_major << '.' << cuda_minor; #endif - ss << "_nvrtc" << nvrtc_major << "." << nvrtc_minor; + ss << "_nvrtc" << nvrtc_major << '.' << nvrtc_minor; ss << (compile_to_sass ? "_sass" : "_ptx"); - ss << "_" << code.length(); - ss << "_" << hash_code; + ss << '_' << code.length(); + ss << '_' << hash_code; file_path = ss.str(); std::ifstream readin{file_path, std::ios::in | std::ifstream::binary}; diff --git a/aten/src/ATen/native/cudnn/ConvShared.cpp b/aten/src/ATen/native/cudnn/ConvShared.cpp index 325b082f314..1584d5e9acd 100644 --- a/aten/src/ATen/native/cudnn/ConvShared.cpp +++ b/aten/src/ATen/native/cudnn/ConvShared.cpp @@ -82,15 +82,15 @@ namespace native { std::ostream& operator<<(std::ostream& out, const ConvolutionParams& params) { out << "ConvolutionParams \n" - << " memory_format = " << params.memory_format << "\n" - << " data_type = " << cudnnTypeToString(params.dataType) << "\n" - << " padding = " << ArrayRef{params.padding} << "\n" - << " stride = " << ArrayRef{params.stride} << "\n" - << " dilation = " << ArrayRef{params.dilation} << "\n" - << " groups = " << params.groups << "\n" + << " memory_format = " << params.memory_format << '\n' + << " data_type = " << cudnnTypeToString(params.dataType) << '\n' + << " padding = " << ArrayRef{params.padding} << '\n' + << " stride = " << ArrayRef{params.stride} << '\n' + << " dilation = " << ArrayRef{params.dilation} << '\n' + << " groups = " << params.groups << '\n' << " deterministic = " << (params.deterministic ? "true" : "false") - << "\n" - << " allow_tf32 = " << (params.allow_tf32 ? "true" : "false") << "\n"; + << '\n' + << " allow_tf32 = " << (params.allow_tf32 ? "true" : "false") << '\n'; return out; } @@ -173,16 +173,16 @@ std::string repro_from_args(const ConvolutionParams& params) { at::globalContext().float32Precision( at::Float32Backend::CUDA, at::Float32Op::MATMUL) == at::Float32Precision::TF32) - << "\n"; + << '\n'; ss << "torch.backends.cudnn.benchmark = " - << pybool(at::globalContext().benchmarkCuDNN()) << "\n"; + << pybool(at::globalContext().benchmarkCuDNN()) << '\n'; ss << "torch.backends.cudnn.deterministic = " << pybool(params.deterministic) - << "\n"; + << '\n'; ss << "torch.backends.cudnn.allow_tf32 = " << pybool(params.allow_tf32) - << "\n"; + << '\n'; ss << "data = torch.randn(" << ArrayRef(params.input_size, dim) << ", dtype=" << full_dtype << ", "; - ss << "device='cuda', requires_grad=True)" << to_channels_last << "\n"; + ss << "device='cuda', requires_grad=True)" << to_channels_last << '\n'; ss << "net = torch.nn.Conv" << dim - 2 << "d(" << in_channels << ", " << out_channels << ", "; ss << "kernel_size=" << ArrayRef(¶ms.weight_size[2], dim - 2) @@ -192,7 +192,7 @@ std::string repro_from_args(const ConvolutionParams& params) { ss << "dilation=" << ArrayRef(params.dilation, dim - 2) << ", "; ss << "groups=" << params.groups << ")\n"; ss << "net = net.cuda()." << partial_dtype << "()" << to_channels_last - << "\n"; + << '\n'; ss << "out = net(data)\n"; ss << "out.backward(torch.randn_like(out))\n"; ss << "torch.cuda.synchronize()\n\n"; diff --git a/aten/src/ATen/native/cudnn/Conv_v7.cpp b/aten/src/ATen/native/cudnn/Conv_v7.cpp index bc064e3ad31..d5102910c64 100644 --- a/aten/src/ATen/native/cudnn/Conv_v7.cpp +++ b/aten/src/ATen/native/cudnn/Conv_v7.cpp @@ -93,11 +93,10 @@ std::ostream& operator<<(std::ostream& out, const ConvolutionArgs& args) { << "input: " << args.idesc // already has a trailing newline << "output: " << args.odesc // already has a trailing newline << "weight: " << args.wdesc // already has a trailing newline - << "Pointer addresses: " - << "\n" - << " input: " << args.input.const_data_ptr() << "\n" - << " output: " << args.output.const_data_ptr() << "\n" - << " weight: " << args.weight.const_data_ptr() << "\n"; + << "Pointer addresses: " << '\n' + << " input: " << args.input.const_data_ptr() << '\n' + << " output: " << args.output.const_data_ptr() << '\n' + << " weight: " << args.weight.const_data_ptr() << '\n'; return out; } diff --git a/aten/src/ATen/native/metal/MetalTensorImplStorage.mm b/aten/src/ATen/native/metal/MetalTensorImplStorage.mm index f614429eefd..20a942a9e25 100644 --- a/aten/src/ATen/native/metal/MetalTensorImplStorage.mm +++ b/aten/src/ATen/native/metal/MetalTensorImplStorage.mm @@ -115,7 +115,7 @@ std::ostream& operator<<( std::copy( strides.begin(), strides.end() - 1, std::ostream_iterator(oss, ",")); oss << sizes.back(); - output << oss.str() << "}"; + output << oss.str() << '}'; return output; } diff --git a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp index 1555eed558e..6827e02cc3f 100644 --- a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp +++ b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp @@ -53,7 +53,7 @@ std::ostream& operator<<(std::ostream& out, const ConvParams& params) { << " transposed = " << params.transposed << " output_padding = " << IntArrayRef{params.output_padding} << " groups = " << params.groups << " benchmark = " << params.benchmark - << " deterministic = " << params.deterministic << "}"; + << " deterministic = " << params.deterministic << '}'; return out; } diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/test/avgpool-microkernel-tester.h b/aten/src/ATen/native/quantized/cpu/qnnpack/test/avgpool-microkernel-tester.h index 1a425146ad6..ac6370f8df2 100644 --- a/aten/src/ATen/native/quantized/cpu/qnnpack/test/avgpool-microkernel-tester.h +++ b/aten/src/ATen/native/quantized/cpu/qnnpack/test/avgpool-microkernel-tester.h @@ -301,12 +301,12 @@ class AvgPoolMicrokernelTester { ASSERT_NEAR( float(int32_t(y[i * yStride() + k])), yFP[i * kc() + k], 0.5001f) << "at pixel " << i << ", channel " << k << ", n = " << n() - << ", ks = " << kh() << "x" << kw() << " (" << ks() + << ", ks = " << kh() << 'x' << kw() << " (" << ks() << "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k]; ASSERT_EQ( uint32_t(yRef[i * kc() + k]), uint32_t(y[i * yStride() + k])) << "at pixel " << i << ", channel " << k << ", n = " << n() - << ", ks = " << kh() << "x" << kw() << " (" << ks() + << ", ks = " << kh() << 'x' << kw() << " (" << ks() << "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k]; } } @@ -396,12 +396,12 @@ class AvgPoolMicrokernelTester { ASSERT_NEAR( float(int32_t(y[i * yStride() + k])), yFP[i * kc() + k], 0.5001f) << "at pixel " << i << ", channel " << k << ", n = " << n() - << ", ks = " << kh() << "x" << kw() << " (" << ks() + << ", ks = " << kh() << 'x' << kw() << " (" << ks() << "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k]; ASSERT_EQ( uint32_t(yRef[i * kc() + k]), uint32_t(y[i * yStride() + k])) << "at pixel " << i << ", channel " << k << ", n = " << n() - << ", ks = " << kh() << "x" << kw() << " (" << ks() + << ", ks = " << kh() << 'x' << kw() << " (" << ks() << "), kc = " << kc() << ", acc = " << yAcc[i * kc() + k]; } } diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/test/maxpool-microkernel-tester.h b/aten/src/ATen/native/quantized/cpu/qnnpack/test/maxpool-microkernel-tester.h index e1583a2c058..fc94f9666d9 100644 --- a/aten/src/ATen/native/quantized/cpu/qnnpack/test/maxpool-microkernel-tester.h +++ b/aten/src/ATen/native/quantized/cpu/qnnpack/test/maxpool-microkernel-tester.h @@ -232,7 +232,7 @@ class MaxPoolMicrokernelTester { ASSERT_EQ( uint32_t(yRef[i * kc() + k]), uint32_t(y[i * yStride() + k])) << "at pixel " << i << ", channel " << k << ", n = " << n() - << ", ks = " << kh() << "x" << kw() << " (" << ks() + << ", ks = " << kh() << 'x' << kw() << " (" << ks() << "), kc = " << kc(); } } diff --git a/aten/src/ATen/native/utils/ParamUtils.h b/aten/src/ATen/native/utils/ParamUtils.h index c9088c03d81..8887664df1c 100644 --- a/aten/src/ATen/native/utils/ParamUtils.h +++ b/aten/src/ATen/native/utils/ParamUtils.h @@ -17,7 +17,7 @@ inline std::vector _expand_param_if_needed( std::ostringstream ss; ss << "expected " << param_name << " to be a single integer value or a " << "list of " << expected_dim << " values to match the convolution " - << "dimensions, but got " << param_name << "=" << list_param; + << "dimensions, but got " << param_name << '=' << list_param; TORCH_CHECK(false, ss.str()); } else { return list_param.vec(); diff --git a/aten/src/ATen/native/vulkan/api/Adapter.cpp b/aten/src/ATen/native/vulkan/api/Adapter.cpp index 173479a0c2d..350df39ea36 100644 --- a/aten/src/ATen/native/vulkan/api/Adapter.cpp +++ b/aten/src/ATen/native/vulkan/api/Adapter.cpp @@ -358,9 +358,9 @@ std::string Adapter::stringize() const { std::string device_type = get_device_type_str(properties.deviceType); VkPhysicalDeviceLimits limits = properties.limits; - ss << "{" << std::endl; + ss << '{' << std::endl; ss << " Physical Device Info {" << std::endl; - ss << " apiVersion: " << v_major << "." << v_minor << std::endl; + ss << " apiVersion: " << v_major << '.' << v_minor << std::endl; ss << " driverversion: " << properties.driverVersion << std::endl; ss << " deviceType: " << device_type << std::endl; ss << " deviceName: " << properties.deviceName << std::endl; @@ -371,7 +371,7 @@ std::string Adapter::stringize() const { #define PRINT_LIMIT_PROP_VEC3(name) \ ss << " " << std::left << std::setw(36) << #name << limits.name[0] \ - << "," << limits.name[1] << "," << limits.name[2] << std::endl; + << ',' << limits.name[1] << ',' << limits.name[2] << std::endl; ss << " Physical Device Limits {" << std::endl; PRINT_LIMIT_PROP(maxImageDimension1D); @@ -425,7 +425,7 @@ std::string Adapter::stringize() const { ; } ss << " ]" << std::endl; - ss << "}"; + ss << '}'; return ss.str(); } diff --git a/aten/src/ATen/native/vulkan/api/Exception.cpp b/aten/src/ATen/native/vulkan/api/Exception.cpp index 9b8b653e061..436b38cbba6 100644 --- a/aten/src/ATen/native/vulkan/api/Exception.cpp +++ b/aten/src/ATen/native/vulkan/api/Exception.cpp @@ -33,7 +33,7 @@ std::ostream& operator<<(std::ostream& out, const VkResult result) { VK_RESULT_CASE(VK_ERROR_FORMAT_NOT_SUPPORTED) VK_RESULT_CASE(VK_ERROR_FRAGMENTED_POOL) default: - out << "VK_ERROR_UNKNOWN (VkResult " << result << ")"; + out << "VK_ERROR_UNKNOWN (VkResult " << result << ')'; break; } return out; @@ -46,7 +46,7 @@ std::ostream& operator<<(std::ostream& out, const VkResult result) { // std::ostream& operator<<(std::ostream& out, const SourceLocation& loc) { - out << loc.function << " at " << loc.file << ":" << loc.line; + out << loc.function << " at " << loc.file << ':' << loc.line; return out; } @@ -66,7 +66,7 @@ Error::Error(SourceLocation source_location, const char* cond, std::string msg) : msg_(std::move(msg)), source_location_{source_location} { std::ostringstream oss; oss << "Exception raised from " << source_location_ << ": "; - oss << "(" << cond << ") is false! "; + oss << '(' << cond << ") is false! "; oss << msg_; what_ = oss.str(); } diff --git a/aten/src/ATen/native/vulkan/api/QueryPool.cpp b/aten/src/ATen/native/vulkan/api/QueryPool.cpp index bfa92357dae..63c163aa44a 100644 --- a/aten/src/ATen/native/vulkan/api/QueryPool.cpp +++ b/aten/src/ATen/native/vulkan/api/QueryPool.cpp @@ -173,8 +173,8 @@ void QueryPool::extract_results() { static std::string stringize(const VkExtent3D& extents) { std::stringstream ss; - ss << "{" << extents.width << ", " << extents.height << ", " << extents.depth - << "}"; + ss << '{' << extents.width << ", " << extents.height << ", " << extents.depth + << '}'; return ss.str(); } diff --git a/aten/src/ATen/native/vulkan/api/Runtime.cpp b/aten/src/ATen/native/vulkan/api/Runtime.cpp index cf8402e40a0..a7485b706c5 100644 --- a/aten/src/ATen/native/vulkan/api/Runtime.cpp +++ b/aten/src/ATen/native/vulkan/api/Runtime.cpp @@ -149,7 +149,7 @@ VKAPI_ATTR VkBool32 VKAPI_CALL debug_report_callback_fn( (void)flags; std::stringstream stream; - stream << layer_prefix << " " << message_code << " " << message << std::endl; + stream << layer_prefix << ' ' << message_code << ' ' << message << std::endl; const std::string log = stream.str(); std::cout << log; diff --git a/aten/src/ATen/native/vulkan/api/Utils.h b/aten/src/ATen/native/vulkan/api/Utils.h index 3172c9c4610..8cd6a74c1c4 100644 --- a/aten/src/ATen/native/vulkan/api/Utils.h +++ b/aten/src/ATen/native/vulkan/api/Utils.h @@ -253,7 +253,7 @@ using vec4 = vec<4u>; // uvec3 is the type representing tensor extents. Useful for debugging. inline std::ostream& operator<<(std::ostream& os, const uvec3& v) { - os << "(" << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ")"; + os << '(' << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ')'; return os; } diff --git a/aten/src/ATen/test/basic.cpp b/aten/src/ATen/test/basic.cpp index 0937de45528..33fe4121a04 100644 --- a/aten/src/ATen/test/basic.cpp +++ b/aten/src/ATen/test/basic.cpp @@ -246,7 +246,7 @@ void TestToCFloat() { void TestToString() { Tensor b = ones({3, 7}) * .0000001f; std::stringstream s; - s << b << "\n"; + s << b << '\n'; std::string expect = "1e-07 *"; ASSERT_EQ_RESOLVED(s.str().substr(0, expect.size()), expect); } diff --git a/aten/src/ATen/test/scalar_test.cpp b/aten/src/ATen/test/scalar_test.cpp index 0d7b62b44d2..a22fb0d16ad 100644 --- a/aten/src/ATen/test/scalar_test.cpp +++ b/aten/src/ATen/test/scalar_test.cpp @@ -33,7 +33,7 @@ struct Foo { static void apply(Tensor a, Tensor b) { scalar_type s = 1; std::stringstream ss; - ss << "hello, dispatch: " << a.toString() << s << "\n"; + ss << "hello, dispatch: " << a.toString() << s << '\n'; auto data = (scalar_type*)a.data_ptr(); (void)data; } @@ -73,8 +73,8 @@ TEST(TestScalar, TestScalar) { Scalar bar = 3.0; Half h = bar.toHalf(); Scalar h2 = h; - cout << "H2: " << h2.toDouble() << " " << what.toFloat() << " " - << bar.toDouble() << " " << what.isIntegral(false) << "\n"; + cout << "H2: " << h2.toDouble() << ' ' << what.toFloat() << ' ' + << bar.toDouble() << ' ' << what.isIntegral(false) << '\n'; auto gen = at::detail::getDefaultCPUGenerator(); { // See Note [Acquire lock when using random generators] @@ -84,7 +84,7 @@ TEST(TestScalar, TestScalar) { } if (at::hasCUDA()) { auto t2 = zeros({4, 4}, at::kCUDA); - cout << &t2 << "\n"; + cout << &t2 << '\n'; } auto t = ones({4, 4}); @@ -129,7 +129,7 @@ TEST(TestScalar, TestScalar) { std::stringstream ss; // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_NO_THROW( - ss << "hello, dispatch" << x.toString() << s << "\n"); + ss << "hello, dispatch" << x.toString() << s << '\n'); auto data = (scalar_t*)x.data_ptr(); (void)data; }); diff --git a/aten/src/ATen/test/test_install/main.cpp b/aten/src/ATen/test/test_install/main.cpp index e9a03d2303a..3a57e0c6212 100644 --- a/aten/src/ATen/test/test_install/main.cpp +++ b/aten/src/ATen/test/test_install/main.cpp @@ -1,5 +1,5 @@ #include int main() { - std::cout << at::ones({3,4}, at::CPU(at::kFloat)) << "\n"; + std::cout << at::ones({3,4}, at::CPU(at::kFloat)) << '\n'; } diff --git a/aten/src/ATen/test/vec_test_all_types.cpp b/aten/src/ATen/test/vec_test_all_types.cpp index da0da761095..c0c05c14841 100644 --- a/aten/src/ATen/test/vec_test_all_types.cpp +++ b/aten/src/ATen/test/vec_test_all_types.cpp @@ -1828,9 +1828,9 @@ namespace { #endif EXPECT_EQ(u16, c10::detail::fp16_ieee_from_fp32_value(f32s[i])) - << "Test failed for float to uint16 " << f32s[i] << "\n"; + << "Test failed for float to uint16 " << f32s[i] << '\n'; EXPECT_EQ(x, c10::detail::fp16_ieee_to_fp32_value(u16)) - << "Test failed for uint16 to float " << u16 << "\n"; + << "Test failed for uint16 to float " << u16 << '\n'; } } TEST(FP8E4M3Test, FP8E4M3ConversionFloat) { @@ -1848,10 +1848,10 @@ namespace { EXPECT_TRUE(std::isnan(f32)); } else { EXPECT_EQ(f32, c10::detail::fp8e4m3fn_to_fp32_value(input)) - << "Test failed for u8 to float " << input << "\n"; + << "Test failed for u8 to float " << input << '\n'; } EXPECT_EQ(u8, c10::detail::fp8e4m3fn_from_fp32_value(f32)) - << "Test failed for float to u8 " << f32 << "\n"; + << "Test failed for float to u8 " << f32 << '\n'; } } TEST(FP8E4M3Test, FP8E4M3BinaryAdd) { @@ -2015,10 +2015,10 @@ namespace { EXPECT_TRUE(std::isnan(f32)); } else { EXPECT_EQ(f32, c10::detail::fp8e5m2_to_fp32_value(input)) - << "Test failed for u8 to float " << input << "\n"; + << "Test failed for u8 to float " << input << '\n'; } EXPECT_EQ(u8, c10::detail::fp8e5m2_from_fp32_value(f32)) - << "Test failed for float to u8 " << f32 << "\n"; + << "Test failed for float to u8 " << f32 << '\n'; } } TEST(FP8E5M2Test, FP8E5M2BinaryAdd) { diff --git a/aten/src/ATen/test/vitals.cpp b/aten/src/ATen/test/vitals.cpp index cc93775bb53..eaf1cc152bc 100644 --- a/aten/src/ATen/test/vitals.cpp +++ b/aten/src/ATen/test/vitals.cpp @@ -19,7 +19,7 @@ TEST(Vitals, Basic) { c10::utils::set_env("TORCH_VITAL", "1"); TORCH_VITAL_DEFINE(Testing); TORCH_VITAL(Testing, Attribute0) << 1; - TORCH_VITAL(Testing, Attribute1) << "1"; + TORCH_VITAL(Testing, Attribute1) << '1'; TORCH_VITAL(Testing, Attribute2) << 1.0f; TORCH_VITAL(Testing, Attribute3) << 1.0; auto t = at::ones({1, 1}); diff --git a/aten/src/ATen/test/vulkan_api_test.cpp b/aten/src/ATen/test/vulkan_api_test.cpp index 396ea59d2f0..29f01fbd78c 100644 --- a/aten/src/ATen/test/vulkan_api_test.cpp +++ b/aten/src/ATen/test/vulkan_api_test.cpp @@ -129,14 +129,14 @@ void showRtol(const at::Tensor& a, const at::Tensor& b) { std::cout << "Max Diff allowed: " << maxDiff << std::endl; if (diff.sizes().size() == 2) { for (const auto y : c10::irange(diff.sizes()[0])) { - std::cout << y << ":"; + std::cout << y << ':'; for (const auto x : c10::irange(diff.sizes()[1])) { float diff_xy = diff[y][x].item(); if (diff_xy > maxDiff) { std::cout << std::setw(5) << x; } else { - std::cout << std::setw(5) << " "; + std::cout << std::setw(5) << ' '; } } std::cout << std::endl; @@ -3276,7 +3276,7 @@ TEST_F(VulkanAPITest, masked_fill_invalidinputs_exceptions) { void print_shape(const std::vector& shape) { for (const auto& num : shape) { - std::cout << num << " "; + std::cout << num << ' '; } } @@ -3367,7 +3367,7 @@ void test_masked_fill_scalar( print_shape(tmp_curr_input_shape); std::cout << "], and mask of shape ["; print_shape(tmp_curr_mask_shape); - std::cout << "]" << std::endl; + std::cout << ']' << std::endl; } ASSERT_TRUE(check); @@ -4542,9 +4542,9 @@ void test_softmax(const at::IntArrayRef shape, bool log_softmax = false) { if (!check) { std::cout << "Softmax test failed on axis " << dim << "for tensor dims {"; for (uint32_t place = 0; place < shape.size() - 1; place++) { - std::cout << shape[place] << " "; + std::cout << shape[place] << ' '; } - std::cout << shape.back() << "}" << std::endl; + std::cout << shape.back() << '}' << std::endl; showRtol(out_cpu, out_vulkan.cpu()); } ASSERT_TRUE(check); diff --git a/aten/src/ATen/test/vulkan_quantized_api_test.cpp b/aten/src/ATen/test/vulkan_quantized_api_test.cpp index 2829aed94de..2eff421a64c 100644 --- a/aten/src/ATen/test/vulkan_quantized_api_test.cpp +++ b/aten/src/ATen/test/vulkan_quantized_api_test.cpp @@ -95,7 +95,7 @@ void showRtol( std::cout << "Max Diff found is: " << diff.max().item() << std::endl; if (diff.sizes().size() == 2) { for (const auto y : c10::irange(diff.sizes()[0])) { - std::cout << y << ":"; + std::cout << y << ':'; for (const auto x : c10::irange(diff.sizes()[1])) { double diff_xy = diff[y][x].item(); if (diff_xy > maxDiff) { @@ -109,7 +109,7 @@ void showRtol( } } } else { - std::cout << std::setw(5) << " "; + std::cout << std::setw(5) << ' '; } } std::cout << std::endl; @@ -148,19 +148,19 @@ using at::native::vulkan::api::utils::ivec4; using at::native::vulkan::api::utils::vec4; std::ostream& operator<<(std::ostream& os, const vec4& v) { - os << "(" << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ", " - << v.data[3u] << ")"; + os << '(' << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ", " + << v.data[3u] << ')'; return os; } std::ostream& operator<<(std::ostream& os, const ivec3& v) { - os << "(" << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ")"; + os << '(' << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ')'; return os; } std::ostream& operator<<(std::ostream& os, const ivec4& v) { - os << "(" << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ", " - << v.data[3u] << ")"; + os << '(' << v.data[0u] << ", " << v.data[1u] << ", " << v.data[2u] << ", " + << v.data[3u] << ')'; return os; } @@ -3379,51 +3379,51 @@ bool _test_quantized_linear( showRtol(out_cpu_dequant, out_vk_to_cpu_dequant); } if (xpos != -1 && ypos != -1) { - std::cout << "\nFailure caused on row/col: " << ypos << "/" << xpos - << "\n"; + std::cout << "\nFailure caused on row/col: " << ypos << '/' << xpos + << '\n'; std::cout << "Input tensor scale: " << scale << " zerop: " << zero_point - << "\n"; - std::cout << "Input tensor row " << ypos << "\n"; + << '\n'; + std::cout << "Input tensor row " << ypos << '\n'; for (int i = 0; i < input_cpu.sizes()[1]; i++) { std::cout << input_cpu[ypos][i].item() << ", "; } - std::cout << "\n"; + std::cout << '\n'; std::cout << "Weight tensor scale: " << w_scale - << " zerop: " << w_zero_point << "\n"; - std::cout << "Weight tensor col " << xpos << "\n"; + << " zerop: " << w_zero_point << '\n'; + std::cout << "Weight tensor col " << xpos << '\n'; for (int i = 0; i < weight.sizes()[1]; i++) { std::cout << weight[xpos][i].item() << ", "; } - std::cout << "\n"; + std::cout << '\n'; std::cout << "Input tensor quantized row " << ypos << " with dtype " - << (input_quant_dtype_int8 ? "QInt8" : "QUInt8") << "\n"; + << (input_quant_dtype_int8 ? "QInt8" : "QUInt8") << '\n'; for (int i = 0; i < input_cpu.sizes()[1]; i++) { std::cout << input_cpu_quantized[ypos][i].item() << ", "; } - std::cout << "\n"; + std::cout << '\n'; std::cout << "Weight tensor quantized col " << xpos << " with dtype " - << (weight_quant_dtype_int8 ? "QInt8" : "QUInt8") << "\n"; + << (weight_quant_dtype_int8 ? "QInt8" : "QUInt8") << '\n'; for (int i = 0; i < weight.sizes()[1]; i++) { std::cout << weight_cpu_quantized[xpos][i].item() << ", "; } - std::cout << "\n"; + std::cout << '\n'; std::cout << "bias tensor\n"; for (int i = 0; i < bias.sizes()[0]; i++) { std::cout << bias[i].item() << ", "; } - std::cout << "\n"; + std::cout << '\n'; std::cout << "out_scale: " << out_scale - << " out_zero_point: " << out_zero_point << "\n"; + << " out_zero_point: " << out_zero_point << '\n'; std::cout << "cpu unmatched output: " - << out_cpu_dequant[ypos][xpos].item() << "\n"; + << out_cpu_dequant[ypos][xpos].item() << '\n'; std::cout << "vk unmatched output: " - << out_vk_to_cpu_dequant[ypos][xpos].item() << "\n"; + << out_vk_to_cpu_dequant[ypos][xpos].item() << '\n'; } } return check; diff --git a/c10/core/DispatchKeySet.cpp b/c10/core/DispatchKeySet.cpp index 107530e9e28..d1ec51b6a47 100644 --- a/c10/core/DispatchKeySet.cpp +++ b/c10/core/DispatchKeySet.cpp @@ -176,7 +176,7 @@ std::ostream& operator<<(std::ostream& os, DispatchKeySet ts) { os << k; first = false; } - os << ")"; + os << ')'; return os; } diff --git a/c10/core/TensorOptions.cpp b/c10/core/TensorOptions.cpp index d3282ae7114..b1a90cce30e 100644 --- a/c10/core/TensorOptions.cpp +++ b/c10/core/TensorOptions.cpp @@ -33,7 +33,7 @@ std::ostream& operator<<(std::ostream& stream, const TensorOptions& options) { } else { stream << "(nullopt)"; } - stream << ")"; + stream << ')'; return stream; } diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp index 9b7c3568a98..08e657a4116 100644 --- a/c10/cuda/CUDADeviceAssertionHost.cpp +++ b/c10/cuda/CUDADeviceAssertionHost.cpp @@ -136,7 +136,7 @@ std::string c10_retrieve_device_side_assertion_info() { // Something failed, let's talk about that oss << failures_found << " CUDA device-side assertion failures were found on GPU #" - << device_num << "!" << std::endl; + << device_num << '!' << std::endl; if (assertion_data_for_device.assertion_count > C10_CUDA_DSA_ASSERTION_COUNT) { oss << "But at least " << assertion_data_for_device.assertion_count @@ -151,17 +151,17 @@ std::string c10_retrieve_device_side_assertion_info() { oss << "Assertion failure " << i << std::endl; oss << " GPU assertion failure message = " << self.assertion_msg << std::endl; - oss << " File containing assertion = " << self.filename << ":" + oss << " File containing assertion = " << self.filename << ':' << self.line_number << std::endl; oss << " Device function containing assertion = " << self.function_name << std::endl; - oss << " Thread ID that failed assertion = [" << self.thread_id[0] << "," - << self.thread_id[1] << "," << self.thread_id[2] << "]" << std::endl; - oss << " Block ID that failed assertion = [" << self.block_id[0] << "," - << self.block_id[1] << "," << self.block_id[2] << "]" << std::endl; + oss << " Thread ID that failed assertion = [" << self.thread_id[0] << ',' + << self.thread_id[1] << ',' << self.thread_id[2] << ']' << std::endl; + oss << " Block ID that failed assertion = [" << self.block_id[0] << ',' + << self.block_id[1] << ',' << self.block_id[2] << ']' << std::endl; if (launch_info.generation_number == self.caller) { oss << " File containing kernel launch = " - << launch_info.launch_filename << ":" << launch_info.launch_linenum + << launch_info.launch_filename << ':' << launch_info.launch_linenum << std::endl; oss << " Function containing kernel launch = " << launch_info.launch_function << std::endl; @@ -175,7 +175,7 @@ std::string c10_retrieve_device_side_assertion_info() { if (launch_registry.gather_launch_stacktrace) { oss << "Launch stacktracing disabled." << std::endl; } else { - oss << "\n" << launch_info.launch_stacktrace << std::endl; + oss << '\n' << launch_info.launch_stacktrace << std::endl; } } else { oss << " CPU launch site info: Unavailable, the circular queue wrapped around. Increase `CUDAKernelLaunchRegistry::max_size`." diff --git a/c10/test/core/DispatchKeySet_test.cpp b/c10/test/core/DispatchKeySet_test.cpp index a93461a041c..cdbdc150167 100644 --- a/c10/test/core/DispatchKeySet_test.cpp +++ b/c10/test/core/DispatchKeySet_test.cpp @@ -435,7 +435,7 @@ TEST(DispatchKeySet, TestFunctionalityDispatchKeyToString) { if (i > 0) { ASSERT_TRUE(res.find("Unknown") == std::string::npos) << i << " (before is " << toString(static_cast(i - 1)) - << ")"; + << ')'; } else { ASSERT_TRUE(res.find("Unknown") == std::string::npos) << i; } diff --git a/c10/test/util/Half_test.cpp b/c10/test/util/Half_test.cpp index a7681461510..33c77ead61f 100644 --- a/c10/test/util/Half_test.cpp +++ b/c10/test/util/Half_test.cpp @@ -96,10 +96,10 @@ TEST(HalfConversionTest, TestPorableConversion) { for (auto x : inputs) { auto target = c10::detail::fp16_ieee_to_fp32_value(x); EXPECT_EQ(halfbits2float(x), target) - << "Test failed for uint16 to float " << x << "\n"; + << "Test failed for uint16 to float " << x << '\n'; EXPECT_EQ( float2halfbits(target), c10::detail::fp16_ieee_from_fp32_value(target)) - << "Test failed for float to uint16" << target << "\n"; + << "Test failed for float to uint16" << target << '\n'; } } diff --git a/c10/test/util/logging_test.cpp b/c10/test/util/logging_test.cpp index b8fc81ddc6b..4587130564d 100644 --- a/c10/test/util/logging_test.cpp +++ b/c10/test/util/logging_test.cpp @@ -98,7 +98,7 @@ struct Noncopyable { }; std::ostream& operator<<(std::ostream& out, const Noncopyable& nc) { - out << "Noncopyable(" << nc.x << ")"; + out << "Noncopyable(" << nc.x << ')'; return out; } } // namespace diff --git a/c10/util/ArrayRef.h b/c10/util/ArrayRef.h index bbbb1d7288f..55900b6ee43 100644 --- a/c10/util/ArrayRef.h +++ b/c10/util/ArrayRef.h @@ -204,13 +204,13 @@ ArrayRef(const std::initializer_list&) -> ArrayRef; template std::ostream& operator<<(std::ostream& out, ArrayRef list) { int i = 0; - out << "["; + out << '['; for (const auto& e : list) { if (i++ > 0) out << ", "; out << e; } - out << "]"; + out << ']'; return out; } diff --git a/c10/util/Backtrace.cpp b/c10/util/Backtrace.cpp index 8838cafb029..29dbfe427ae 100644 --- a/c10/util/Backtrace.cpp +++ b/c10/util/Backtrace.cpp @@ -106,8 +106,8 @@ class GetBacktraceImpl { /*length*/ &length, /*status*/ &status); - os << " frame #" << idx++ << "\t" - << ((demangled != NULL && status == 0) ? demangled : symbol) << "[" + os << " frame #" << idx++ << '\t' + << ((demangled != NULL && status == 0) ? demangled : symbol) << '[' << addr << "]\t" << std::endl; } free(demangled); @@ -274,7 +274,7 @@ class GetBacktraceImpl { } else { // In the edge-case where we couldn't parse the frame string, we can // just use it directly (it may have a different format). - stream << symbols[frame_number] << "\n"; + stream << symbols[frame_number] << '\n'; } } @@ -413,8 +413,8 @@ class GetBacktraceImpl { << back_trace_[i_frame] << std::dec; if (with_symbol) { stream << std::setfill('0') << std::setw(16) << std::uppercase - << std::hex << p_symbol->Address << std::dec << " " << module - << "!" << p_symbol->Name; + << std::hex << p_symbol->Address << std::dec << ' ' << module + << '!' << p_symbol->Name; } else { stream << " " << module << "!"; } @@ -424,7 +424,7 @@ class GetBacktraceImpl { } else { stream << " @ "; } - stream << "]" << std::endl; + stream << ']' << std::endl; } return stream.str(); diff --git a/c10/util/Exception.cpp b/c10/util/Exception.cpp index 1928c2c175c..50f423f9179 100644 --- a/c10/util/Exception.cpp +++ b/c10/util/Exception.cpp @@ -44,7 +44,7 @@ std::string Error::compute_what(bool include_backtrace) const { if (context_.size() == 1) { // Fold error and context in one line - oss << " (" << context_[0] << ")"; + oss << " (" << context_[0] << ')'; } else { for (const auto& c : context_) { oss << "\n " << c; @@ -52,7 +52,7 @@ std::string Error::compute_what(bool include_backtrace) const { } if (include_backtrace && backtrace_) { - oss << "\n" << backtrace_->get(); + oss << '\n' << backtrace_->get(); } return oss.str(); @@ -247,7 +247,7 @@ void WarningHandler::process(const Warning& warning) { LOG_AT_FILE_LINE( WARNING, warning.source_location().file, warning.source_location().line) << "Warning: " << warning.msg() << " (function " - << warning.source_location().function << ")"; + << warning.source_location().function << ')'; } std::string GetExceptionString(const std::exception& e) { diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index 4bf96b1b680..298503dfbe3 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -473,12 +473,12 @@ MessageLogger::MessageLogger( if (GLOBAL_RANK != -1) { stream_ << "[rank" << GLOBAL_RANK << "]:"; } - stream_ << "[" << CAFFE2_SEVERITY_PREFIX[std::min(4, GLOG_FATAL - severity_)] + stream_ << '[' << CAFFE2_SEVERITY_PREFIX[std::min(4, GLOG_FATAL - severity_)] << (timeinfo->tm_mon + 1) * 100 + timeinfo->tm_mday - << std::setfill('0') << " " << std::setw(2) << timeinfo->tm_hour - << ":" << std::setw(2) << timeinfo->tm_min << ":" << std::setw(2) - << timeinfo->tm_sec << "." << std::setw(9) << ns << " " - << c10::detail::StripBasename(std::string(file)) << ":" << line + << std::setfill('0') << ' ' << std::setw(2) << timeinfo->tm_hour + << ':' << std::setw(2) << timeinfo->tm_min << ':' << std::setw(2) + << timeinfo->tm_sec << '.' << std::setw(9) << ns << ' ' + << c10::detail::StripBasename(std::string(file)) << ':' << line << "] "; } @@ -488,7 +488,7 @@ MessageLogger::~MessageLogger() noexcept(false) { // Nothing needs to be logged. return; } - stream_ << "\n"; + stream_ << '\n'; #ifdef ANDROID static const int android_log_levels[] = { ANDROID_LOG_FATAL, // LOG_FATAL diff --git a/c10/util/SmallVector.h b/c10/util/SmallVector.h index d02c9380a56..d47f37cdf7e 100644 --- a/c10/util/SmallVector.h +++ b/c10/util/SmallVector.h @@ -1412,13 +1412,13 @@ inline size_t capacity_in_bytes(const SmallVector& X) { template std::ostream& operator<<(std::ostream& out, const SmallVector& list) { int i = 0; - out << "["; + out << '['; for (auto e : list) { if (i++ > 0) out << ", "; out << e; } - out << "]"; + out << ']'; return out; } diff --git a/c10/util/StringUtil.cpp b/c10/util/StringUtil.cpp index 063a8fc93ea..6fae2f004cc 100644 --- a/c10/util/StringUtil.cpp +++ b/c10/util/StringUtil.cpp @@ -79,7 +79,7 @@ std::ostream& _str(std::ostream& ss, const std::wstring& wString) { } // namespace detail std::ostream& operator<<(std::ostream& out, const SourceLocation& loc) { - out << loc.function << " at " << loc.file << ":" << loc.line; + out << loc.function << " at " << loc.file << ':' << loc.line; return out; } diff --git a/c10/util/StringUtil.h b/c10/util/StringUtil.h index cbc6f4ec336..de241bc9f7c 100644 --- a/c10/util/StringUtil.h +++ b/c10/util/StringUtil.h @@ -170,7 +170,7 @@ inline bool isPrint(char s) { } inline void printQuotedString(std::ostream& stmt, const std::string_view str) { - stmt << "\""; + stmt << '"'; for (auto s : str) { switch (s) { case '\\': @@ -224,7 +224,7 @@ inline void printQuotedString(std::ostream& stmt, const std::string_view str) { break; } } - stmt << "\""; + stmt << '"'; } template diff --git a/c10/util/signal_handler.cpp b/c10/util/signal_handler.cpp index 831c0d02452..bfb04e1ccbc 100644 --- a/c10/util/signal_handler.cpp +++ b/c10/util/signal_handler.cpp @@ -223,7 +223,7 @@ void FatalSignalHandler::fatalSignalHandler(int signum) { // a single thread that wouldn't receive the SIGUSR2 if (std::cv_status::timeout == writingCond.wait_for(ul, 2s)) { if (!signalReceived) { - std::cerr << "signal lost waiting for stacktrace " << pid << ":" + std::cerr << "signal lost waiting for stacktrace " << pid << ':' << tid << '\n'; break; } diff --git a/c10/util/sparse_bitset.h b/c10/util/sparse_bitset.h index c8eb0df47f6..e7ad1db06d6 100644 --- a/c10/util/sparse_bitset.h +++ b/c10/util/sparse_bitset.h @@ -877,7 +877,7 @@ std::ostream& operator<<( std::ostream& stream, const SparseBitVector& vec) { bool first = true; - stream << "{"; + stream << '{'; for (auto el : vec) { if (first) { first = false; @@ -886,7 +886,7 @@ std::ostream& operator<<( } stream << el; } - stream << "}"; + stream << '}'; return stream; } diff --git a/torch/csrc/DataLoader.cpp b/torch/csrc/DataLoader.cpp index a6ad3f00b27..31cec72d8a1 100644 --- a/torch/csrc/DataLoader.cpp +++ b/torch/csrc/DataLoader.cpp @@ -61,7 +61,7 @@ static void setSignalHandler( sigaction(signal, &sa, old_sa_ptr) != 0) { std::ostringstream oss; oss << "An error occurred while setting handler for " << strsignal(signal) - << "."; + << '.'; TORCH_CHECK(false, oss.str()); } } diff --git a/torch/csrc/Device.cpp b/torch/csrc/Device.cpp index f3babe4cd72..da7b287369d 100644 --- a/torch/csrc/Device.cpp +++ b/torch/csrc/Device.cpp @@ -29,14 +29,14 @@ PyObject* THPDevice_New(const at::Device& device) { static PyObject* THPDevice_repr(THPDevice* self) { std::ostringstream oss; - oss << "device(type=\'" << self->device.type() << "\'"; + oss << "device(type=\'" << self->device.type() << '\''; if (self->device.has_index()) { // `self->device.index()` returns uint8_t which is treated as ascii while // printing, hence casting it to uint16_t. // https://stackoverflow.com/questions/19562103/uint8-t-cant-be-printed-with-cout oss << ", index=" << static_cast(self->device.index()); } - oss << ")"; + oss << ')'; return THPUtils_packString(oss.str().c_str()); } diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index adf1c8c4c4d..61ef99e8086 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -212,8 +212,8 @@ static PyObject* THPModule_initExtension( } auto frame_id = s_tb[idx]; const auto& frame = s_tbs.all_frames.at(frame_id); - oss << "#" << idx << " " << frame.funcname << " from " << frame.filename - << ":" << frame.lineno << '\n'; + oss << '#' << idx << ' ' << frame.funcname << " from " << frame.filename + << ':' << frame.lineno << '\n'; } return oss.str(); }); @@ -2772,8 +2772,8 @@ Call this whenever a new thread is created in order to propagate values from py_module.def("_dump_local_tls_set", []() { auto local_keyset = c10::impl::tls_local_dispatch_key_set(); - std::cout << "Included: " << toString(local_keyset.included_) << "\n"; - std::cout << "Excluded: " << toString(local_keyset.excluded_) << "\n"; + std::cout << "Included: " << toString(local_keyset.included_) << '\n'; + std::cout << "Excluded: " << toString(local_keyset.excluded_) << '\n'; }); py_module.def( diff --git a/torch/csrc/TypeInfo.cpp b/torch/csrc/TypeInfo.cpp index 6874374eff7..de23b795360 100644 --- a/torch/csrc/TypeInfo.cpp +++ b/torch/csrc/TypeInfo.cpp @@ -254,7 +254,7 @@ static PyObject* THPFInfo_str(THPFInfo* self) { << PyFloat_AsDouble(THPFInfo_smallest_normal(self, nullptr)); oss << ", tiny=" << PyFloat_AsDouble(THPFInfo_tiny(self, nullptr)); if (dtypeStr != nullptr) { - oss << ", dtype=" << PyUnicode_AsUTF8(dtypeStr) << ")"; + oss << ", dtype=" << PyUnicode_AsUTF8(dtypeStr) << ')'; } return !PyErr_Occurred() ? THPUtils_packString(oss.str().c_str()) : nullptr; } @@ -266,7 +266,7 @@ static PyObject* THPIInfo_str(THPIInfo* self) { oss << "iinfo(min=" << PyLong_AsDouble(THPIInfo_min(self, nullptr)); oss << ", max=" << PyLong_AsDouble(THPIInfo_max(self, nullptr)); if (dtypeStr) { - oss << ", dtype=" << PyUnicode_AsUTF8(dtypeStr) << ")"; + oss << ", dtype=" << PyUnicode_AsUTF8(dtypeStr) << ')'; } return !PyErr_Occurred() ? THPUtils_packString(oss.str().c_str()) : nullptr; diff --git a/torch/csrc/api/include/torch/detail/TensorDataContainer.h b/torch/csrc/api/include/torch/detail/TensorDataContainer.h index 9485af1d297..152672c7f3f 100644 --- a/torch/csrc/api/include/torch/detail/TensorDataContainer.h +++ b/torch/csrc/api/include/torch/detail/TensorDataContainer.h @@ -271,7 +271,7 @@ struct TensorDataContainer { "TensorDataContainer_pretty_print_scalar", [&] { stream << scalar_.to(); }); } else if (is_init_list()) { - stream << "{"; + stream << '{'; for (const TensorDataContainer* it = init_list_.begin(); it != init_list_.end(); it++) { @@ -279,9 +279,9 @@ struct TensorDataContainer { if (std::next(it) != init_list_.end()) stream << ", "; } - stream << "}"; + stream << '}'; } else if (is_tensor()) { - stream << "{"; + stream << '{'; for (const auto i : c10::irange(tensor_.sizes()[0])) { AT_DISPATCH_ALL_TYPES_AND3( at::kBool, @@ -293,7 +293,7 @@ struct TensorDataContainer { if (i != tensor_.sizes()[0] - 1) stream << ", "; } - stream << "}"; + stream << '}'; } else { TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type"); } diff --git a/torch/csrc/api/include/torch/nn/modules/batchnorm.h b/torch/csrc/api/include/torch/nn/modules/batchnorm.h index 8437ffd7afb..a0456578da0 100644 --- a/torch/csrc/api/include/torch/nn/modules/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/modules/batchnorm.h @@ -145,7 +145,7 @@ class BatchNormImplBase : public NormImplBase { stream << ", " << "affine=" << this->options.affine() << ", " << "track_running_stats=" << this->options.track_running_stats() - << ")"; + << ')'; } }; diff --git a/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h b/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h index 008d790fdec..72cc777cd5c 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h +++ b/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h @@ -28,13 +28,13 @@ class ParameterDictImpl : public Cloneable { void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ParameterDict(" << '\n'; for (const auto& pair : parameters_) { - stream << "(" << pair.key() << ")" - << ": Parameter containing: [" << pair.value().scalar_type() - << " of size " << pair.value().sizes() << "]"; + stream << '(' << pair.key() << ')' << ": Parameter containing: [" + << pair.value().scalar_type() << " of size " + << pair.value().sizes() << ']'; ; stream << '\n'; } - stream << ")"; + stream << ')'; } /// Insert the parameter along with the key into ParameterDict diff --git a/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h b/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h index 198172ab564..c4221571540 100644 --- a/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h +++ b/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h @@ -36,13 +36,13 @@ class ParameterListImpl : public Cloneable { void pretty_print(std::ostream& stream) const override { stream << "torch::nn::ParameterList(" << '\n'; for (const auto& pair : parameters_) { - stream << "(" << pair.key() << ")" - << ": Parameter containing: [" << pair.value().scalar_type() - << " of size " << pair.value().sizes() << "]"; + stream << '(' << pair.key() << ')' << ": Parameter containing: [" + << pair.value().scalar_type() << " of size " + << pair.value().sizes() << ']'; ; stream << '\n'; } - stream << ")"; + stream << ')'; } /// push the a given parameter at the end of the list diff --git a/torch/csrc/api/include/torch/nn/modules/conv.h b/torch/csrc/api/include/torch/nn/modules/conv.h index 8c5f1f3e391..56fb6023ed4 100644 --- a/torch/csrc/api/include/torch/nn/modules/conv.h +++ b/torch/csrc/api/include/torch/nn/modules/conv.h @@ -113,8 +113,8 @@ class ConvNdImpl : public torch::nn::Cloneable { /// Pretty prints the `Conv{1,2,3}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override { - stream << "torch::nn::Conv" << D << "d" - << "(" << options.in_channels() << ", " << options.out_channels() + stream << "torch::nn::Conv" << D << 'd' << '(' << options.in_channels() + << ", " << options.out_channels() << ", kernel_size=" << options.kernel_size() << ", stride=" << options.stride(); std::visit( @@ -143,7 +143,7 @@ class ConvNdImpl : public torch::nn::Cloneable { stream << ", padding_mode=" << enumtype::get_enum_name(options.padding_mode()); } - stream << ")"; + stream << ')'; } /// The options with which this `Module` was constructed. @@ -278,8 +278,8 @@ class ConvTransposeNdImpl : public ConvNdImpl { /// Pretty prints the `ConvTranspose{1,2,3}d` module into the given `stream`. void pretty_print(std::ostream& stream) const override { - stream << "torch::nn::ConvTranspose" << D << "d" - << "(" << this->options.in_channels() << ", " + stream << "torch::nn::ConvTranspose" << D << 'd' << '(' + << this->options.in_channels() << ", " << this->options.out_channels() << ", kernel_size=" << this->options.kernel_size() << ", stride=" << this->options.stride(); @@ -303,7 +303,7 @@ class ConvTransposeNdImpl : public ConvNdImpl { stream << ", padding_mode=" << enumtype::get_enum_name(this->options.padding_mode()); } - stream << ")"; + stream << ')'; } protected: diff --git a/torch/csrc/api/include/torch/nn/modules/instancenorm.h b/torch/csrc/api/include/torch/nn/modules/instancenorm.h index 228f181715f..492aba8e4e2 100644 --- a/torch/csrc/api/include/torch/nn/modules/instancenorm.h +++ b/torch/csrc/api/include/torch/nn/modules/instancenorm.h @@ -53,7 +53,7 @@ class InstanceNormImpl << "momentum=" << this->options.momentum() << ", " << "affine=" << this->options.affine() << ", " << "track_running_stats=" << this->options.track_running_stats() - << ")"; + << ')'; } }; diff --git a/torch/csrc/api/include/torch/nn/modules/pooling.h b/torch/csrc/api/include/torch/nn/modules/pooling.h index 17ed12f4cc0..4f08bf31031 100644 --- a/torch/csrc/api/include/torch/nn/modules/pooling.h +++ b/torch/csrc/api/include/torch/nn/modules/pooling.h @@ -232,8 +232,8 @@ class TORCH_API AdaptiveMaxPoolImpl : public torch::nn::Cloneable { /// Pretty prints the `AdaptiveMaxPool{1,2,3}d` module into the given /// `stream`. void pretty_print(std::ostream& stream) const override { - stream << "torch::nn::AdaptiveMaxPool" << D << "d" - << "(output_size=" << options.output_size() << ")"; + stream << "torch::nn::AdaptiveMaxPool" << D << 'd' + << "(output_size=" << options.output_size() << ')'; } /// The options with which this `Module` was constructed. @@ -365,8 +365,8 @@ class TORCH_API AdaptiveAvgPoolImpl : public torch::nn::Cloneable { /// Pretty prints the `AdaptiveAvgPool{1,2,3}d` module into the given /// `stream`. void pretty_print(std::ostream& stream) const override { - stream << "torch::nn::AdaptiveAvgPool" << D << "d" - << "(output_size=" << options.output_size() << ")"; + stream << "torch::nn::AdaptiveAvgPool" << D << 'd' + << "(output_size=" << options.output_size() << ')'; } /// The options with which this `Module` was constructed. diff --git a/torch/csrc/api/src/nn/module.cpp b/torch/csrc/api/src/nn/module.cpp index 563ed4789cb..5dbc36b7dd5 100644 --- a/torch/csrc/api/src/nn/module.cpp +++ b/torch/csrc/api/src/nn/module.cpp @@ -355,11 +355,11 @@ void Module::pretty_print_recursive( stream << "(\n"; const std::string next_indentation = indentation + " "; for (const auto& child : children_) { - stream << next_indentation << "(" << child.key() << "): "; + stream << next_indentation << '(' << child.key() << "): "; child.value()->pretty_print_recursive(stream, next_indentation); stream << '\n'; } - stream << indentation << ")"; + stream << indentation << ')'; } } diff --git a/torch/csrc/api/src/nn/modules/activation.cpp b/torch/csrc/api/src/nn/modules/activation.cpp index 68949f3fb49..5144ea51ece 100644 --- a/torch/csrc/api/src/nn/modules/activation.cpp +++ b/torch/csrc/api/src/nn/modules/activation.cpp @@ -21,7 +21,7 @@ void ELUImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << ", inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -39,7 +39,7 @@ void SELUImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << "inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -55,7 +55,7 @@ void HardshrinkImpl::reset() {} void HardshrinkImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::Hardshrink(" << options.lambda() - << ")"; + << ')'; } // ============================================================================ @@ -86,7 +86,7 @@ void HardtanhImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << ", inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -107,7 +107,7 @@ void LeakyReLUImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << ", inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -129,7 +129,7 @@ SoftmaxImpl::SoftmaxImpl(const SoftmaxOptions& options_) : options(options_) {} void SoftmaxImpl::reset() {} void SoftmaxImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::Softmax(dim=" << options.dim() << ")"; + stream << "torch::nn::Softmax(dim=" << options.dim() << ')'; } Tensor SoftmaxImpl::forward(const Tensor& input) { @@ -143,7 +143,7 @@ SoftminImpl::SoftminImpl(const SoftminOptions& options_) : options(options_) {} void SoftminImpl::reset() {} void SoftminImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::Softmin(dim=" << options.dim() << ")"; + stream << "torch::nn::Softmin(dim=" << options.dim() << ')'; } Tensor SoftminImpl::forward(const Tensor& input) { @@ -158,7 +158,7 @@ LogSoftmaxImpl::LogSoftmaxImpl(const LogSoftmaxOptions& options_) void LogSoftmaxImpl::reset() {} void LogSoftmaxImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::LogSoftmax(dim=" << options.dim() << ")"; + stream << "torch::nn::LogSoftmax(dim=" << options.dim() << ')'; } Tensor LogSoftmaxImpl::forward(const Tensor& input) { @@ -197,7 +197,7 @@ void PReLUImpl::reset() { void PReLUImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::PReLU(num_parameters=" << options.num_parameters() - << ")"; + << ')'; } // ============================================================================ @@ -215,7 +215,7 @@ void ReLUImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << "inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -233,7 +233,7 @@ void ReLU6Impl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << "inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -257,7 +257,7 @@ void RReLUImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << ", inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -275,7 +275,7 @@ void CELUImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << ", inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ @@ -289,7 +289,7 @@ Tensor GLUImpl::forward(const Tensor& input) { void GLUImpl::reset() {} void GLUImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::GLU(dim=" << options.dim() << ")"; + stream << "torch::nn::GLU(dim=" << options.dim() << ')'; } // ============================================================================ @@ -355,7 +355,7 @@ void SoftplusImpl::reset() {} void SoftplusImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::Softplus(beta=" << options.beta() - << ", threshold=" << options.threshold() << ")"; + << ", threshold=" << options.threshold() << ')'; } // ============================================================================ @@ -370,7 +370,7 @@ Tensor SoftshrinkImpl::forward(const Tensor& input) { void SoftshrinkImpl::reset() {} void SoftshrinkImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::Softshrink(" << options.lambda() << ")"; + stream << "torch::nn::Softshrink(" << options.lambda() << ')'; } // ============================================================================ @@ -430,7 +430,7 @@ void ThresholdImpl::pretty_print(std::ostream& stream) const { if (options.inplace()) { stream << std::boolalpha << ", inplace=" << options.inplace(); } - stream << ")"; + stream << ')'; } // ============================================================================ diff --git a/torch/csrc/api/src/nn/modules/distance.cpp b/torch/csrc/api/src/nn/modules/distance.cpp index d8e7fa8ac40..7b45deadac9 100644 --- a/torch/csrc/api/src/nn/modules/distance.cpp +++ b/torch/csrc/api/src/nn/modules/distance.cpp @@ -12,7 +12,7 @@ void CosineSimilarityImpl::reset() {} void CosineSimilarityImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::CosineSimilarity" - << "(dim=" << options.dim() << ", eps=" << options.eps() << ")"; + << "(dim=" << options.dim() << ", eps=" << options.eps() << ')'; } Tensor CosineSimilarityImpl::forward(const Tensor& x1, const Tensor& x2) { @@ -30,7 +30,7 @@ void PairwiseDistanceImpl::reset() {} void PairwiseDistanceImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::PairwiseDistance" << "(p=" << options.p() << ", eps=" << options.eps() - << ", keepdim=" << options.keepdim() << ")"; + << ", keepdim=" << options.keepdim() << ')'; } Tensor PairwiseDistanceImpl::forward(const Tensor& x1, const Tensor& x2) { diff --git a/torch/csrc/api/src/nn/modules/dropout.cpp b/torch/csrc/api/src/nn/modules/dropout.cpp index 2b7c5aa3a28..08433bf3631 100644 --- a/torch/csrc/api/src/nn/modules/dropout.cpp +++ b/torch/csrc/api/src/nn/modules/dropout.cpp @@ -19,7 +19,7 @@ Tensor DropoutImpl::forward(Tensor input) { void DropoutImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::Dropout(p=" << options.p() - << ", inplace=" << options.inplace() << ")"; + << ", inplace=" << options.inplace() << ')'; } // ============================================================================ @@ -31,7 +31,7 @@ Tensor Dropout2dImpl::forward(Tensor input) { void Dropout2dImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::Dropout2d(p=" << options.p() - << ", inplace=" << options.inplace() << ")"; + << ", inplace=" << options.inplace() << ')'; } // ============================================================================ @@ -43,7 +43,7 @@ Tensor Dropout3dImpl::forward(Tensor input) { void Dropout3dImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::Dropout3d(p=" << options.p() - << ", inplace=" << options.inplace() << ")"; + << ", inplace=" << options.inplace() << ')'; } // ============================================================================ @@ -55,7 +55,7 @@ Tensor AlphaDropoutImpl::forward(const Tensor& input) { void AlphaDropoutImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::AlphaDropout(p=" << options.p() - << ", inplace=" << options.inplace() << ")"; + << ", inplace=" << options.inplace() << ')'; } // ============================================================================ @@ -67,7 +67,7 @@ Tensor FeatureAlphaDropoutImpl::forward(const Tensor& input) { void FeatureAlphaDropoutImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::FeatureAlphaDropout(p=" << options.p() - << ", inplace=" << options.inplace() << ")"; + << ", inplace=" << options.inplace() << ')'; } } // namespace torch::nn diff --git a/torch/csrc/api/src/nn/modules/embedding.cpp b/torch/csrc/api/src/nn/modules/embedding.cpp index b9fededfd73..e704e71c97e 100644 --- a/torch/csrc/api/src/nn/modules/embedding.cpp +++ b/torch/csrc/api/src/nn/modules/embedding.cpp @@ -76,7 +76,7 @@ void EmbeddingImpl::pretty_print(std::ostream& stream) const { if (options.sparse()) { stream << ", sparse=" << std::boolalpha << options.sparse(); } - stream << ")"; + stream << ')'; } torch::Tensor EmbeddingImpl::forward(const Tensor& input) { @@ -181,6 +181,6 @@ void EmbeddingBagImpl::pretty_print(std::ostream& stream) const { if (padding_idx_opt.has_value()) { stream << ", padding_idx=" << padding_idx_opt.value(); } - stream << ")"; + stream << ')'; } } // namespace torch::nn diff --git a/torch/csrc/api/src/nn/modules/fold.cpp b/torch/csrc/api/src/nn/modules/fold.cpp index 32c83ca6e1b..43b07b84fcf 100644 --- a/torch/csrc/api/src/nn/modules/fold.cpp +++ b/torch/csrc/api/src/nn/modules/fold.cpp @@ -17,7 +17,7 @@ void FoldImpl::pretty_print(std::ostream& stream) const { << ", kernel_size=" << options.kernel_size() << ", dilation=" << options.dilation() << ", padding=" << options.padding() << ", stride=" << options.stride() - << ")"; + << ')'; } Tensor FoldImpl::forward(const Tensor& input) { @@ -40,7 +40,7 @@ void UnfoldImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::Unfold(kernel_size=" << options.kernel_size() << ", dilation=" << options.dilation() << ", padding=" << options.padding() << ", stride=" << options.stride() - << ")"; + << ')'; } Tensor UnfoldImpl::forward(const Tensor& input) { diff --git a/torch/csrc/api/src/nn/modules/linear.cpp b/torch/csrc/api/src/nn/modules/linear.cpp index 0b31e3aa037..6ed92d2998c 100644 --- a/torch/csrc/api/src/nn/modules/linear.cpp +++ b/torch/csrc/api/src/nn/modules/linear.cpp @@ -55,7 +55,7 @@ void LinearImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::Linear(in_features=" << options.in_features() << ", out_features=" << options.out_features() - << ", bias=" << options.bias() << ")"; + << ", bias=" << options.bias() << ')'; } Tensor LinearImpl::forward(const Tensor& input) { @@ -70,7 +70,7 @@ void FlattenImpl::reset() {} void FlattenImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::Flatten(start_dim=" << options.start_dim() - << ", end_dim=" << options.end_dim() << ")"; + << ", end_dim=" << options.end_dim() << ')'; } Tensor FlattenImpl::forward(const Tensor& input) { @@ -161,7 +161,7 @@ void BilinearImpl::pretty_print(std::ostream& stream) const { << "torch::nn::Bilinear(in1_features=" << options.in1_features() << ", in2_features=" << options.in2_features() << ", out_features=" << options.out_features() - << ", bias=" << options.bias() << ")"; + << ", bias=" << options.bias() << ')'; } Tensor BilinearImpl::forward(const Tensor& input1, const Tensor& input2) { diff --git a/torch/csrc/api/src/nn/modules/loss.cpp b/torch/csrc/api/src/nn/modules/loss.cpp index 7cae60ac992..6ea9d76af81 100644 --- a/torch/csrc/api/src/nn/modules/loss.cpp +++ b/torch/csrc/api/src/nn/modules/loss.cpp @@ -74,7 +74,7 @@ HingeEmbeddingLossImpl::HingeEmbeddingLossImpl( void HingeEmbeddingLossImpl::reset() {} void HingeEmbeddingLossImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::HingeEmbeddingLoss(margin=" << options.margin() << ")"; + stream << "torch::nn::HingeEmbeddingLoss(margin=" << options.margin() << ')'; } Tensor HingeEmbeddingLossImpl::forward( @@ -104,7 +104,7 @@ void MultiMarginLossImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::MultiMarginLoss(p=" << options.p() << ", margin=" << options.margin() << ", weight=" << options.weight() << ", reduction=" << enumtype::get_enum_name(options.reduction()) - << ")"; + << ')'; } Tensor MultiMarginLossImpl::forward(const Tensor& input, const Tensor& target) { @@ -126,7 +126,7 @@ CosineEmbeddingLossImpl::CosineEmbeddingLossImpl( void CosineEmbeddingLossImpl::reset() {} void CosineEmbeddingLossImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::CosineEmbeddingLoss(margin=" << options.margin() << ")"; + stream << "torch::nn::CosineEmbeddingLoss(margin=" << options.margin() << ')'; } Tensor CosineEmbeddingLossImpl::forward( @@ -169,7 +169,7 @@ void TripletMarginLossImpl::reset() {} void TripletMarginLossImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::TripletMarginLoss(margin=" << options.margin() << ", p=" << options.p() << ", eps=" << options.eps() << std::boolalpha - << ", swap=" << options.swap() << ")"; + << ", swap=" << options.swap() << ')'; } Tensor TripletMarginLossImpl::forward( @@ -199,7 +199,7 @@ void TripletMarginWithDistanceLossImpl::pretty_print( std::ostream& stream) const { stream << "torch::nn::TripletMarginWithDistanceLoss(margin=" << options.margin() << std::boolalpha << ", swap=" << options.swap() - << ")"; + << ')'; } Tensor TripletMarginWithDistanceLossImpl::forward( diff --git a/torch/csrc/api/src/nn/modules/normalization.cpp b/torch/csrc/api/src/nn/modules/normalization.cpp index 41129c89909..72957356a3d 100644 --- a/torch/csrc/api/src/nn/modules/normalization.cpp +++ b/torch/csrc/api/src/nn/modules/normalization.cpp @@ -40,7 +40,7 @@ void LayerNormImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::LayerNorm(" << torch::IntArrayRef(options.normalized_shape()) << ", eps=" << options.eps() - << ", elementwise_affine=" << options.elementwise_affine() << ")"; + << ", elementwise_affine=" << options.elementwise_affine() << ')'; } torch::Tensor LayerNormImpl::forward(const Tensor& input) { @@ -64,7 +64,7 @@ void LocalResponseNormImpl::reset() {} void LocalResponseNormImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::LocalResponseNorm(" << options.size() << ", alpha=" << options.alpha() << ", beta=" << options.beta() - << ", k=" << options.k() << ")"; + << ", k=" << options.k() << ')'; } // ============================================================================ @@ -74,7 +74,7 @@ void CrossMapLRN2dImpl::reset() {} void CrossMapLRN2dImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::CrossMapLRN2d(" << options.size() << ", alpha=" << options.alpha() << ", beta=" << options.beta() - << ", k=" << options.k() << ")"; + << ", k=" << options.k() << ')'; } torch::Tensor CrossMapLRN2dImpl::forward(const torch::Tensor& input) { @@ -115,7 +115,7 @@ torch::Tensor GroupNormImpl::forward(const Tensor& input) { void GroupNormImpl::pretty_print(std::ostream& stream) const { stream << std::boolalpha << "torch::nn::GroupNorm(" << options.num_groups() << ", " << options.num_channels() << ", eps=" << options.eps() - << ", affine=" << options.affine() << ")"; + << ", affine=" << options.affine() << ')'; } } // namespace torch::nn diff --git a/torch/csrc/api/src/nn/modules/padding.cpp b/torch/csrc/api/src/nn/modules/padding.cpp index d992bf696d0..2e3212f7c94 100644 --- a/torch/csrc/api/src/nn/modules/padding.cpp +++ b/torch/csrc/api/src/nn/modules/padding.cpp @@ -21,8 +21,8 @@ Tensor ReflectionPadImpl::forward(const Tensor& input) { template void ReflectionPadImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::ReflectionPad" << D << "d" - << "(padding=" << options.padding() << ")"; + stream << "torch::nn::ReflectionPad" << D << 'd' + << "(padding=" << options.padding() << ')'; } template class ReflectionPadImpl<1, ReflectionPad1dImpl>; @@ -46,8 +46,8 @@ Tensor ReplicationPadImpl::forward(const Tensor& input) { template void ReplicationPadImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::ReplicationPad" << D << "d" - << "(padding=" << options.padding() << ")"; + stream << "torch::nn::ReplicationPad" << D << 'd' + << "(padding=" << options.padding() << ')'; } template class ReplicationPadImpl<1, ReplicationPad1dImpl>; @@ -70,8 +70,8 @@ Tensor ZeroPadImpl::forward(const Tensor& input) { template void ZeroPadImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::ZeroPad" << D << "d" - << "(padding=" << options.padding() << ")"; + stream << "torch::nn::ZeroPad" << D << 'd' << "(padding=" << options.padding() + << ')'; } template class ZeroPadImpl<1, ZeroPad1dImpl>; @@ -96,9 +96,9 @@ Tensor ConstantPadImpl::forward(const Tensor& input) { template void ConstantPadImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::ConstantPad" << D << "d" + stream << "torch::nn::ConstantPad" << D << 'd' << "(padding=" << options.padding() << ", value=" << options.value() - << ")"; + << ')'; } template class ConstantPadImpl<1, ConstantPad1dImpl>; diff --git a/torch/csrc/api/src/nn/modules/pixelshuffle.cpp b/torch/csrc/api/src/nn/modules/pixelshuffle.cpp index b11a99eea4e..bae89d19649 100644 --- a/torch/csrc/api/src/nn/modules/pixelshuffle.cpp +++ b/torch/csrc/api/src/nn/modules/pixelshuffle.cpp @@ -9,7 +9,7 @@ PixelShuffleImpl::PixelShuffleImpl(const PixelShuffleOptions& options_) void PixelShuffleImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::PixelShuffle(upscale_factor=" - << options.upscale_factor() << ")"; + << options.upscale_factor() << ')'; } void PixelShuffleImpl::reset() {} @@ -23,7 +23,7 @@ PixelUnshuffleImpl::PixelUnshuffleImpl(const PixelUnshuffleOptions& options_) void PixelUnshuffleImpl::pretty_print(std::ostream& stream) const { stream << "torch::nn::PixelUnshuffle(downscale_factor=" - << options.downscale_factor() << ")"; + << options.downscale_factor() << ')'; } void PixelUnshuffleImpl::reset() {} diff --git a/torch/csrc/api/src/nn/modules/pooling.cpp b/torch/csrc/api/src/nn/modules/pooling.cpp index f42cfe6b202..3d6aeb6dffb 100644 --- a/torch/csrc/api/src/nn/modules/pooling.cpp +++ b/torch/csrc/api/src/nn/modules/pooling.cpp @@ -15,10 +15,10 @@ void AvgPoolImpl::reset() {} template void AvgPoolImpl::pretty_print(std::ostream& stream) const { - stream << "torch::nn::AvgPool" << D << "d" + stream << "torch::nn::AvgPool" << D << 'd' << "(kernel_size=" << options.kernel_size() << ", stride=" << options.stride() << ", padding=" << options.padding() - << ")"; + << ')'; } Tensor AvgPool1dImpl::forward(const Tensor& input) { @@ -68,11 +68,11 @@ void MaxPoolImpl::reset() {} template void MaxPoolImpl::pretty_print(std::ostream& stream) const { - stream << std::boolalpha << "torch::nn::MaxPool" << D << "d" + stream << std::boolalpha << "torch::nn::MaxPool" << D << 'd' << "(kernel_size=" << options.kernel_size() << ", stride=" << options.stride() << ", padding=" << options.padding() << ", dilation=" << options.dilation() - << ", ceil_mode=" << options.ceil_mode() << ")"; + << ", ceil_mode=" << options.ceil_mode() << ')'; } Tensor MaxPool1dImpl::forward(const Tensor& input) { @@ -219,10 +219,10 @@ void MaxUnpoolImpl::reset() {} template void MaxUnpoolImpl::pretty_print(std::ostream& stream) const { - stream << std::boolalpha << "torch::nn::MaxUnpool" << D << "d" + stream << std::boolalpha << "torch::nn::MaxUnpool" << D << 'd' << "(kernel_size=" << options.kernel_size() << ", stride=" << options.stride() << ", padding=" << options.padding() - << ")"; + << ')'; } Tensor MaxUnpool1dImpl::forward( @@ -401,7 +401,7 @@ void LPPoolImpl::pretty_print(std::ostream& stream) const { << "norm_type=" << options.norm_type() << ", " << "kernel_size=" << options.kernel_size() << ", " << "stride=" << options.stride() << ", " - << "ceil_mode=" << options.ceil_mode() << ")"; + << "ceil_mode=" << options.ceil_mode() << ')'; } Tensor LPPool1dImpl::forward(const Tensor& input) { diff --git a/torch/csrc/api/src/nn/modules/rnn.cpp b/torch/csrc/api/src/nn/modules/rnn.cpp index be7c5ded2fc..7ee864bc8ea 100644 --- a/torch/csrc/api/src/nn/modules/rnn.cpp +++ b/torch/csrc/api/src/nn/modules/rnn.cpp @@ -374,7 +374,7 @@ void RNNImplBase::pretty_print(std::ostream& stream) const { if (options_base.proj_size() > 0) { stream << ", proj_size=" << options_base.proj_size(); } - stream << ")"; + stream << ')'; } template @@ -837,7 +837,7 @@ template void RNNCellImplBase::pretty_print(std::ostream& stream) const { const std::string name = this->name(); const std::string name_without_impl = name.substr(0, name.size() - 4); - stream << name_without_impl << "(" << options_base.input_size() << ", " + stream << name_without_impl << '(' << options_base.input_size() << ", " << options_base.hidden_size(); if (!options_base.bias()) { stream << ", bias=" << std::boolalpha << false; @@ -846,7 +846,7 @@ void RNNCellImplBase::pretty_print(std::ostream& stream) const { if (!nonlinearity_str.empty() && nonlinearity_str != "kTanh") { stream << ", nonlinearity=" << nonlinearity_str; } - stream << ")"; + stream << ')'; } template diff --git a/torch/csrc/api/src/nn/modules/upsampling.cpp b/torch/csrc/api/src/nn/modules/upsampling.cpp index 420ffe5a881..e29f1034fa5 100644 --- a/torch/csrc/api/src/nn/modules/upsampling.cpp +++ b/torch/csrc/api/src/nn/modules/upsampling.cpp @@ -18,7 +18,7 @@ void UpsampleImpl::pretty_print(std::ostream& stream) const { // NOLINTNEXTLINE(bugprone-unchecked-optional-access) stream << "size=" << at::ArrayRef(options.size().value()); } - stream << ", mode=" << enumtype::get_enum_name(options.mode()) << ")"; + stream << ", mode=" << enumtype::get_enum_name(options.mode()) << ')'; } Tensor UpsampleImpl::forward(const Tensor& input) { diff --git a/torch/csrc/autograd/saved_variable.cpp b/torch/csrc/autograd/saved_variable.cpp index 0124a0212bc..55def20af78 100644 --- a/torch/csrc/autograd/saved_variable.cpp +++ b/torch/csrc/autograd/saved_variable.cpp @@ -172,15 +172,15 @@ Variable SavedVariable::unpack(std::shared_ptr saved_for) const { message << "one of the variables needed for gradient computation has been " "modified by an inplace operation: [" - << data_.toString() << " "; + << data_.toString() << ' '; if (data_.is_nested()) { - message << data_._nested_tensor_size() << "]"; + message << data_._nested_tensor_size() << ']'; } else { - message << data_.sizes() << "]"; + message << data_.sizes() << ']'; } if (grad_fn) { message << ", which is output " << output_nr_ << " of " - << grad_fn->name() << ","; + << grad_fn->name() << ','; } message << " is at version " << current_version << "; expected version " << saved_version_ << " instead."; diff --git a/torch/csrc/cuda/Module.cpp b/torch/csrc/cuda/Module.cpp index b14323a47bf..a8ae82b1b66 100644 --- a/torch/csrc/cuda/Module.cpp +++ b/torch/csrc/cuda/Module.cpp @@ -1114,7 +1114,7 @@ static void registerCudaDeviceProperties(PyObject* module) { stream << "_CudaDeviceProperties(name='" << prop.name << "', major=" << prop.major << ", minor=" << prop.minor #if USE_ROCM - << ", gcnArchName='" << prop.gcnArchName << "'" + << ", gcnArchName='" << prop.gcnArchName << '\'' #endif // USE_ROCM << ", total_memory=" << prop.totalGlobalMem / (1024ull * 1024) << "MB, multi_processor_count=" << prop.multiProcessorCount diff --git a/torch/csrc/distributed/c10d/FlightRecorderDetail.hpp b/torch/csrc/distributed/c10d/FlightRecorderDetail.hpp index 88205c17194..28647b8c50f 100644 --- a/torch/csrc/distributed/c10d/FlightRecorderDetail.hpp +++ b/torch/csrc/distributed/c10d/FlightRecorderDetail.hpp @@ -24,8 +24,8 @@ std::string FlightRecorder::Entry::getTraceback() { for (auto idx : c10::irange(s_tb.size())) { auto frame_id = s_tb[idx]; const auto& frame = s_tbs.all_frames.at(frame_id); - oss << "#" << idx << " " << frame.funcname << " from " << frame.filename - << ":" << frame.lineno << '\n'; + oss << '#' << idx << ' ' << frame.funcname << " from " << frame.filename + << ':' << frame.lineno << '\n'; } /* Resulted format is like: #0 all_reduce from pytorch/torch/distributed/distributed_c10d.py:2696 diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index e99d9b0cf85..8ae3bf3b314 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -2016,7 +2016,7 @@ void ProcessGroupNCCL::HeartbeatMonitor::runLoop() { << pg_->logPrefix() << "ProcessGroupNCCL monitor thread is disabled, but would have terminated the process" << "after attempting to dump debug info, due to " << exitReason - << "."; + << '.'; } } } diff --git a/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp b/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp index 624a8fc11b6..fa40ff15ec7 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp @@ -174,7 +174,7 @@ struct CollectiveFingerPrint { ss << "Detected mismatch between collectives on ranks. Rank " << backend->getRank() << " is running collective: " << *this << ", but Rank " << rank - << " is running collective: " << rank_fingerprint << "."; + << " is running collective: " << rank_fingerprint << '.'; auto diff_result = compute_collective_diff(rank_fingerprint); if (std::get<0>(diff_result)) { ss << std::get<1>(diff_result); diff --git a/torch/csrc/distributed/c10d/UCCTracing.cpp b/torch/csrc/distributed/c10d/UCCTracing.cpp index 66d62d662c2..78fac30d2ab 100644 --- a/torch/csrc/distributed/c10d/UCCTracing.cpp +++ b/torch/csrc/distributed/c10d/UCCTracing.cpp @@ -51,7 +51,7 @@ void ProcessGroupUCCLogger::flushComms(int rank, int world_size) { _outfile.open(trace_filename, std::ofstream::out | std::ofstream::trunc); // flush the traced comms if (_outfile.is_open()) { - _outfile << "[" << c10::Join(",", trace_generator->getCommsTrace()) + _outfile << '[' << c10::Join(",", trace_generator->getCommsTrace()) << "\n]"; _outfile.flush(); _outfile.close(); diff --git a/torch/csrc/distributed/c10d/UCCUtils.cpp b/torch/csrc/distributed/c10d/UCCUtils.cpp index 6794c4eaa59..9e297ad339f 100644 --- a/torch/csrc/distributed/c10d/UCCUtils.cpp +++ b/torch/csrc/distributed/c10d/UCCUtils.cpp @@ -35,7 +35,7 @@ ucc_status_t oob_allgather( *req = coll_info; } catch (std::exception& ex) { LOG(ERROR) << "(oob_allgather) Caught exception in Store Operation .. " - << "[" << ex.what() << "]"; + << '[' << ex.what() << ']'; return UCC_ERR_NO_MESSAGE; } return UCC_OK; @@ -61,7 +61,7 @@ ucc_status_t oob_allgather_test(void* req) { } } catch (std::exception& ex) { LOG(ERROR) << "(oob_allgather) Caught exception in Store Operation .. " - << "[" << ex.what() << "]"; + << '[' << ex.what() << ']'; return UCC_ERR_NO_MESSAGE; } return UCC_OK; @@ -91,7 +91,7 @@ ucc_status_t oob_allgather_free(void* req) { info->getKey(kAllGatherFree + std::to_string(info->rank))); } catch (std::exception& ex) { LOG(ERROR) << "(oob_allgather) Caught exception in Store Operation .. " - << "[" << ex.what() << "]"; + << '[' << ex.what() << ']'; return UCC_ERR_NO_MESSAGE; } return UCC_OK; diff --git a/torch/csrc/distributed/c10d/Utils.hpp b/torch/csrc/distributed/c10d/Utils.hpp index fc9d735401c..25193b54af9 100644 --- a/torch/csrc/distributed/c10d/Utils.hpp +++ b/torch/csrc/distributed/c10d/Utils.hpp @@ -48,14 +48,14 @@ TORCH_API std::vector getTensorShapes( // Turns at::IntArrayRef into "(1, 2, 3, 4)". inline std::string toString(at::IntArrayRef l) { std::stringstream ss; - ss << "("; + ss << '('; for (const auto i : c10::irange(l.size())) { if (i > 0) { ss << ", "; } ss << l[i]; } - ss << ")"; + ss << ')'; return ss.str(); } diff --git a/torch/csrc/distributed/c10d/control_plane/WorkerServer.cpp b/torch/csrc/distributed/c10d/control_plane/WorkerServer.cpp index 2f77bb119a9..8bbe8576207 100644 --- a/torch/csrc/distributed/c10d/control_plane/WorkerServer.cpp +++ b/torch/csrc/distributed/c10d/control_plane/WorkerServer.cpp @@ -87,17 +87,17 @@ WorkerServer::WorkerServer(const std::string& hostOrFile, int port) { "/handler/", [](const httplib::Request& req [[maybe_unused]], httplib::Response& res) { std::ostringstream body; - body << "["; + body << '['; bool first = true; for (const auto& name : getHandlerNames()) { if (!first) { - body << ","; + body << ','; } first = false; - body << "\"" << jsonStrEscape(name) << "\""; + body << '"' << jsonStrEscape(name) << '"'; } - body << "]"; + body << ']'; res.set_content(body.str(), "application/json"); }); diff --git a/torch/csrc/distributed/c10d/logger.cpp b/torch/csrc/distributed/c10d/logger.cpp index 170748a6035..c9ef7262f8c 100644 --- a/torch/csrc/distributed/c10d/logger.cpp +++ b/torch/csrc/distributed/c10d/logger.cpp @@ -215,10 +215,10 @@ void Logger::set_construction_data_and_log( ddp_logging_data_->ints_map["rank"]); std::stringstream ddpLoggingDataInfo; for (const auto& intItem : ddp_logging_data_->ints_map) { - ddpLoggingDataInfo << intItem.first << ": " << intItem.second << "\n"; + ddpLoggingDataInfo << intItem.first << ": " << intItem.second << '\n'; } for (const auto& strItem : ddp_logging_data_->strs_map) { - ddpLoggingDataInfo << strItem.first << ": " << strItem.second << "\n"; + ddpLoggingDataInfo << strItem.first << ": " << strItem.second << '\n'; } LOG(INFO) << initInfo << ddpLoggingDataInfo.str(); } diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp index 10a2251754c..a1c9b4a3039 100644 --- a/torch/csrc/distributed/c10d/reducer.cpp +++ b/torch/csrc/distributed/c10d/reducer.cpp @@ -615,8 +615,8 @@ void Reducer::delay_all_reduce() { param_name != param_names_.end(), "Expected to find parameter name from unused parameters map in debug mode."); // Add the param_name - unused_params_stream << "{" << param_name->second << "," << unused_index - << "}"; + unused_params_stream << '{' << param_name->second << ',' << unused_index + << '}'; } // Each rank prints out all the unused parameters detected diff --git a/torch/csrc/distributed/c10d/symm_mem/CUDASymmetricMemoryUtils.hpp b/torch/csrc/distributed/c10d/symm_mem/CUDASymmetricMemoryUtils.hpp index efec39e9eb7..e246620df31 100644 --- a/torch/csrc/distributed/c10d/symm_mem/CUDASymmetricMemoryUtils.hpp +++ b/torch/csrc/distributed/c10d/symm_mem/CUDASymmetricMemoryUtils.hpp @@ -61,7 +61,7 @@ class StoreExchange { peer_keys.reserve(world_size); for (int r = 0; r < world_size; ++r) { std::ostringstream oss; - oss << store_prefix_ << "/" << seq_id_ << "/" << r; + oss << store_prefix_ << '/' << seq_id_ << '/' << r; peer_keys.push_back(oss.str()); } ++seq_id_; diff --git a/torch/csrc/distributed/c10d/symm_mem/DMAConnectivity.cpp b/torch/csrc/distributed/c10d/symm_mem/DMAConnectivity.cpp index 0d54c389dde..44a19e96dee 100644 --- a/torch/csrc/distributed/c10d/symm_mem/DMAConnectivity.cpp +++ b/torch/csrc/distributed/c10d/symm_mem/DMAConnectivity.cpp @@ -7,7 +7,7 @@ std::string get_detector_key( c10::DeviceType device_type, const std::string& connection_type) { std::ostringstream oss; - oss << device_type << "/" << connection_type; + oss << device_type << '/' << connection_type; return oss.str(); } diff --git a/torch/csrc/distributed/c10d/symm_mem/NCCLSymmetricMemory.cu b/torch/csrc/distributed/c10d/symm_mem/NCCLSymmetricMemory.cu index 0eda605fad6..c099e2d72ec 100644 --- a/torch/csrc/distributed/c10d/symm_mem/NCCLSymmetricMemory.cu +++ b/torch/csrc/distributed/c10d/symm_mem/NCCLSymmetricMemory.cu @@ -207,7 +207,7 @@ class NCCLSymmetricMemoryAllocator : public SymmetricMemoryAllocator { auto buffer_size_map = storeExchange.all_gather(group_info.store, group_info.rank, group_info.world_size, it->second->buffer_size); - LOG(INFO) << "[rank " << group_info.rank << "]" + LOG(INFO) << "[rank " << group_info.rank << ']' << "buffer_size_map: " << buffer_size_map; // NCCL window registration api requires all ranks to have the same buffer size // we have this check to make sure all ranks have the same buffer size. diff --git a/torch/csrc/distributed/c10d/symm_mem/NVSHMEMSymmetricMemory.cu b/torch/csrc/distributed/c10d/symm_mem/NVSHMEMSymmetricMemory.cu index 69e75df453f..510f5c4dd1b 100644 --- a/torch/csrc/distributed/c10d/symm_mem/NVSHMEMSymmetricMemory.cu +++ b/torch/csrc/distributed/c10d/symm_mem/NVSHMEMSymmetricMemory.cu @@ -71,7 +71,7 @@ class NVSHMEMPeerAllocInfo : public c10::intrusive_ptr_target { storeExchange.all_gather(store, rank_, world_size_, global_rank); exchanged_n_times++; if (rank_ == 0) { - LOG(INFO) << "[rank " << rank_ << "]" + LOG(INFO) << "[rank " << rank_ << ']' << " rank_to_global_rank: " << group_info.rank_to_global_rank << ", group_name: " << group_name << ", exchanged_n_times: " << exchanged_n_times; diff --git a/torch/csrc/distributed/c10d/symm_mem/intra_node_comm.cpp b/torch/csrc/distributed/c10d/symm_mem/intra_node_comm.cpp index 0d53d100cee..f62577e7018 100644 --- a/torch/csrc/distributed/c10d/symm_mem/intra_node_comm.cpp +++ b/torch/csrc/distributed/c10d/symm_mem/intra_node_comm.cpp @@ -121,7 +121,7 @@ static std::vector storeAllGather( std::vector peerKeys; for (size_t r = 0; r < worldSize; ++r) { std::ostringstream oss; - oss << prefix << "-" << r; + oss << prefix << '-' << r; peerKeys.push_back(oss.str()); } @@ -187,7 +187,7 @@ bool IntraNodeComm::rendezvous() { if (strcmp(info.hostname, peerDevInfos.front().hostname) != 0) { LOG(WARNING) << "Aborting IntraNodeComm::rendezvous because some " "participants are not on the same host (" - << info.hostname << ", " << devInfo.hostname << ")"; + << info.hostname << ", " << devInfo.hostname << ')'; return false; } rankToDeviceIdx.emplace_back(info.deviceIdx); diff --git a/torch/csrc/distributed/c10d/symm_mem/nvshmem_extension.cu b/torch/csrc/distributed/c10d/symm_mem/nvshmem_extension.cu index cb5d40ef418..a7a87e4bd86 100644 --- a/torch/csrc/distributed/c10d/symm_mem/nvshmem_extension.cu +++ b/torch/csrc/distributed/c10d/symm_mem/nvshmem_extension.cu @@ -57,7 +57,7 @@ bool is_nvshmem_available() { // Open the shared library, RTLD_LAZY defers symbol resolution until needed handle = dlopen("libnvshmem_host.so.3", RTLD_LAZY); if (!handle) { - std::cerr << dlerror() << "\n"; + std::cerr << dlerror() << '\n'; is_available = 0; } else { is_available = 1; diff --git a/torch/csrc/distributed/rpc/rpc_agent.cpp b/torch/csrc/distributed/rpc/rpc_agent.cpp index 9eee15bdc4d..a41969ebc12 100644 --- a/torch/csrc/distributed/rpc/rpc_agent.cpp +++ b/torch/csrc/distributed/rpc/rpc_agent.cpp @@ -326,7 +326,7 @@ std::unordered_map RpcAgent::getDebugInfo() { std::ostream& operator<<(std::ostream& os, const WorkerInfo& workerInfo) { return os << "WorkerInfo(id=" << workerInfo.id_ - << ", name=" << workerInfo.name_ << ")"; + << ", name=" << workerInfo.name_ << ')'; } } // namespace torch::distributed::rpc diff --git a/torch/csrc/distributed/rpc/rref_impl.cpp b/torch/csrc/distributed/rpc/rref_impl.cpp index ecf3cbd9991..59087eb3e6a 100644 --- a/torch/csrc/distributed/rpc/rref_impl.cpp +++ b/torch/csrc/distributed/rpc/rref_impl.cpp @@ -290,12 +290,12 @@ void OwnerRRef::setError(std::exception_ptr eptr) { std::ostream& operator<<(std::ostream& os, const RRef& rref) { if (rref.isOwner()) { return os << "OwnerRRef(" - << "rref_id=" << rref.rrefId() << ")"; + << "rref_id=" << rref.rrefId() << ')'; } else { return os << "UserRRef(" << "rref_id=" << rref.rrefId() << ", fork_id=" << static_cast(&rref)->forkId() - << ")"; + << ')'; } } diff --git a/torch/csrc/distributed/rpc/types.cpp b/torch/csrc/distributed/rpc/types.cpp index 8a3a18e96a2..1a19fa47082 100644 --- a/torch/csrc/distributed/rpc/types.cpp +++ b/torch/csrc/distributed/rpc/types.cpp @@ -83,7 +83,7 @@ GloballyUniqueId GloballyUniqueId::fromIValue(const at::IValue& ivalue) { std::ostream& operator<<(std::ostream& os, GloballyUniqueId const& globalId) { return os << "GloballyUniqueId(created_on=" << globalId.createdOn_ - << ", local_id=" << globalId.localId_ << ")"; + << ", local_id=" << globalId.localId_ << ')'; } /////////////////////////// SerializedPyObj /////////////////////////// diff --git a/torch/csrc/dynamo/python_compiled_autograd.cpp b/torch/csrc/dynamo/python_compiled_autograd.cpp index 0e70be3e9ff..c24f2cffdd7 100644 --- a/torch/csrc/dynamo/python_compiled_autograd.cpp +++ b/torch/csrc/dynamo/python_compiled_autograd.cpp @@ -434,10 +434,10 @@ struct VerboseLogger : public PythonLogger { } oss << it->key_size; if (std::next(it) != cached_keys.end()) { - oss << ","; + oss << ','; } } - oss << "]"; + oss << ']'; std::string compile_reason = oss.str(); log(PythonLogger::DEBUG, compile_reason); return compile_reason; @@ -454,7 +454,7 @@ struct VerboseLogger : public PythonLogger { } oss << "sizes[" << std::to_string(new_dyn_sizes_idx[new_dyn_sizes_idx.size() - 1]) - << "]"; + << ']'; std::string recompile_reason = oss.str(); log(PythonLogger::DEBUG, recompile_reason); return recompile_reason; diff --git a/torch/csrc/export/upgrader.cpp b/torch/csrc/export/upgrader.cpp index 04da1ab2a2d..ec275593e6f 100644 --- a/torch/csrc/export/upgrader.cpp +++ b/torch/csrc/export/upgrader.cpp @@ -78,7 +78,7 @@ void registerUpgrader( << " and keypath: "; for (size_t i = 0; i < keypath.size(); ++i) { if (i > 0) - error_stream << "."; + error_stream << '.'; error_stream << keypath[i]; } TORCH_CHECK(false, error_stream.str()); diff --git a/torch/csrc/inductor/aoti_eager/kernel_meta_info.cpp b/torch/csrc/inductor/aoti_eager/kernel_meta_info.cpp index 1642ee4beca..25cd32b6b52 100644 --- a/torch/csrc/inductor/aoti_eager/kernel_meta_info.cpp +++ b/torch/csrc/inductor/aoti_eager/kernel_meta_info.cpp @@ -100,12 +100,12 @@ std::ostream& operator<<( stream << "device_: " << tensor_metadata.device_ << '\n'; stream << "sizes_: "; for (const auto& size : tensor_metadata.sizes_) { - stream << size << " "; + stream << size << ' '; } stream << '\n'; stream << "strides_: "; for (const auto& stride : tensor_metadata.strides_) { - stream << stride << " "; + stream << stride << ' '; } stream << "requires_grad_: " << tensor_metadata.requires_grad_ << '\n'; diff --git a/torch/csrc/inductor/aoti_package/model_package_loader.cpp b/torch/csrc/inductor/aoti_package/model_package_loader.cpp index 05d7aa04425..93c8f71e84d 100644 --- a/torch/csrc/inductor/aoti_package/model_package_loader.cpp +++ b/torch/csrc/inductor/aoti_package/model_package_loader.cpp @@ -696,7 +696,7 @@ AOTIModelPackageLoader::AOTIModelPackageLoader( } else { LOG(WARNING) << "You are using an outdated version of the pt2 archive which do not have a prefix in front of each filename. Example: \n" - << found_filenames[0] << "\n" + << found_filenames[0] << '\n' << found_filenames[1]; } diff --git a/torch/csrc/inductor/aoti_runtime/model_base.h b/torch/csrc/inductor/aoti_runtime/model_base.h index 19f1dca1b7e..bf8f07edb14 100644 --- a/torch/csrc/inductor/aoti_runtime/model_base.h +++ b/torch/csrc/inductor/aoti_runtime/model_base.h @@ -468,7 +468,7 @@ class AOTInductorModelBase { auto code = cudaEventDestroy(*run_finished_); if (code != cudaSuccess) { std::cerr << "Failed to destroy CUDA event in AOTInductor model: " - << cudaGetErrorString(code) << "\n"; + << cudaGetErrorString(code) << '\n'; } } #endif // USE_CUDA diff --git a/torch/csrc/inductor/aoti_torch/shim_common.cpp b/torch/csrc/inductor/aoti_torch/shim_common.cpp index 2df92210997..d6db06af5f2 100644 --- a/torch/csrc/inductor/aoti_torch/shim_common.cpp +++ b/torch/csrc/inductor/aoti_torch/shim_common.cpp @@ -1261,7 +1261,7 @@ void aoti_torch_print_tensor_handle(AtenTensorHandle self, const char* msg) { at::Tensor* t = tensor_handle_to_tensor_pointer(self); // Display message - std::cout << "["; + std::cout << '['; if (msg) { std::cout << " " << msg; } @@ -1270,7 +1270,7 @@ void aoti_torch_print_tensor_handle(AtenTensorHandle self, const char* msg) { // Print exact tensor values for small size tensors const int64_t numel = t->numel(); if (numel <= AOTI_TORCH_MAX_NUMEL_TO_PRINT) { - std::cout << *t << "\n"; + std::cout << *t << '\n'; } // Print summary stats of the tensor @@ -1316,7 +1316,7 @@ void aoti_torch_print_tensor_handle(AtenTensorHandle self, const char* msg) { std::cout << "[INFO] Aten built-in function `min_all_cuda/max_all_cuda` not implemented for current dtype: " << t->dtype() << ". Printing out the whole value:\n" - << *t << "\n"; + << *t << '\n'; } } } diff --git a/torch/csrc/jit/api/module.cpp b/torch/csrc/jit/api/module.cpp index 53be7504fe2..61c32680c7c 100644 --- a/torch/csrc/jit/api/module.cpp +++ b/torch/csrc/jit/api/module.cpp @@ -615,7 +615,7 @@ std::string Module::dump_to_str( print_method_bodies, print_attr_values, print_param_values)); } ss << " }" << '\n'; - ss << "}" << '\n'; + ss << '}' << '\n'; return ss.str(); } diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h index c9b7793c89b..739eaf478f1 100644 --- a/torch/csrc/jit/api/module.h +++ b/torch/csrc/jit/api/module.h @@ -652,7 +652,7 @@ struct NamedPolicy { std::ostringstream ss; for (const auto i : c10::irange(cursors.size())) { if (i > 0) { - ss << "."; + ss << '.'; } ss << nameFragment(cursors[i]); } diff --git a/torch/csrc/jit/backends/backend_detail.cpp b/torch/csrc/jit/backends/backend_detail.cpp index de352f50ab5..2edf832e042 100644 --- a/torch/csrc/jit/backends/backend_detail.cpp +++ b/torch/csrc/jit/backends/backend_detail.cpp @@ -305,8 +305,8 @@ Module codegen_backend_module( TORCH_INTERNAL_ASSERT(default_value.has_value()); std::stringstream def_ss, fwd_ss; // Annotate type of the arg - def_ss << name << ": " << arg.type()->annotation_str(nullptr) << "="; - fwd_ss << name << "=" << name; + def_ss << name << ": " << arg.type()->annotation_str(nullptr) << '='; + fwd_ss << name << '=' << name; default_value->repr( def_ss, [](std::ostream&, const IValue&) -> bool { return false; }); def_inputs.emplace_back(def_ss.str()); @@ -337,18 +337,18 @@ Module codegen_backend_module( if (out_tuple_ty) { auto tuple_elements = out_tuple_ty->elements(); - type_check_ss << tuple_elements[0]->annotation_str() << ")"; + type_check_ss << tuple_elements[0]->annotation_str() << ')'; type_checks.emplace_back(type_check_ss.str()); for (unsigned i = 1, e = tuple_elements.size(); i < e; ++i) { type_check_ss.str(std::string()); type_check_ss.clear(); out_ss << ", _" << i; type_check_ss << "assert isinstance(_" << i << ", " - << tuple_elements[i]->annotation_str() << ")"; + << tuple_elements[i]->annotation_str() << ')'; type_checks.emplace_back(type_check_ss.str()); } } else { - type_check_ss << out_ty->annotation_str() << ")"; + type_check_ss << out_ty->annotation_str() << ')'; type_checks.emplace_back(type_check_ss.str()); } @@ -364,7 +364,7 @@ Module codegen_backend_module( // If the output type is a single element tuple then add an extra comma // to ensure the final output maintains this type. if (out_tuple_ty && out_tuple_ty->elements().size() == 1) { - out_ss << ","; + out_ss << ','; } method_te.s("ret", out_ss.str()); diff --git a/torch/csrc/jit/codegen/fuser/tensor_desc.h b/torch/csrc/jit/codegen/fuser/tensor_desc.h index 0c5db65d54a..55cd4008e18 100644 --- a/torch/csrc/jit/codegen/fuser/tensor_desc.h +++ b/torch/csrc/jit/codegen/fuser/tensor_desc.h @@ -88,10 +88,10 @@ struct TORCH_API TensorDesc { }; inline std::ostream& operator<<(std::ostream& out, const TensorDesc& d) { - out << d.scalar_type << "["; + out << d.scalar_type << '['; for (const auto b : d.contiguity) - out << b << ";"; - out << "]"; + out << b << ';'; + out << ']'; return out; } diff --git a/torch/csrc/jit/frontend/concrete_module_type.cpp b/torch/csrc/jit/frontend/concrete_module_type.cpp index 91d41607f9d..1cb5fb225dc 100644 --- a/torch/csrc/jit/frontend/concrete_module_type.cpp +++ b/torch/csrc/jit/frontend/concrete_module_type.cpp @@ -305,39 +305,37 @@ void ConcreteModuleTypeBuilder::addIgnoredAttribute(std::string name) { void ConcreteModuleType::dump() const { std::cout << "ConcreteModuleType for: " - << py::getattr(data_.pyClass_, "__name__") << "\n"; + << py::getattr(data_.pyClass_, "__name__") << '\n'; std::cout << "Constants: \n"; for (const auto& pr : data_.constants_) { - std::cout << "\t" << pr.first << ": " << pr.second << "\n"; + std::cout << '\t' << pr.first << ": " << pr.second << '\n'; } std::cout << "\nAttributes: \n"; for (const auto& pr : data_.attributes_) { - std::cout << "\t" << pr.key() << ": " << pr.value().type_->annotation_str() - << "\n"; + std::cout << '\t' << pr.key() << ": " << pr.value().type_->annotation_str() + << '\n'; } std::cout << "\nSubmodules: \n"; for (const auto& info : data_.modules_) { - std::cout << "\t" << info.name_ << ": " - << info.meta_->getJitType()->annotation_str() << "\n"; + std::cout << '\t' << info.name_ << ": " + << info.meta_->getJitType()->annotation_str() << '\n'; } std::cout << "\nForward Pre-Hooks: \n"; for (const auto& pre_hook_id : data_.forwardPreHooks_) { - std::cout << "\t" - << "pre_hook id: " << pre_hook_id << "\n"; + std::cout << '\t' << "pre_hook id: " << pre_hook_id << '\n'; } std::cout << "\nForward Hooks: \n"; for (const auto& hook_id : data_.forwardHooks_) { - std::cout << "\t" - << "hook id: " << hook_id << "\n"; + std::cout << '\t' << "hook id: " << hook_id << '\n'; } std::cout << "\nOverloads: \n"; for (const auto& pr : data_.overloads_) { - std::cout << "\t" << pr.first << ": " << pr.second << "\n"; + std::cout << '\t' << pr.first << ": " << pr.second << '\n'; } std::string isPoisoned = data_.isPoisoned_ ? "true" : "false"; - std::cout << "isPoisoned: " << isPoisoned << "\n"; + std::cout << "isPoisoned: " << isPoisoned << '\n'; if (jitType_) { - std::cout << "jit type: " << jitType_->annotation_str() << "\n"; + std::cout << "jit type: " << jitType_->annotation_str() << '\n'; } } diff --git a/torch/csrc/jit/frontend/error_report.cpp b/torch/csrc/jit/frontend/error_report.cpp index d5a8408e971..47a9343c538 100644 --- a/torch/csrc/jit/frontend/error_report.cpp +++ b/torch/csrc/jit/frontend/error_report.cpp @@ -99,7 +99,7 @@ std::string ErrorReport::current_call_stack() { const char* ErrorReport::what() const noexcept { std::stringstream msg; - msg << "\n" << ss.str(); + msg << '\n' << ss.str(); msg << ":\n"; context.highlight(msg); diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp index e7949b0ac4b..fba613b5ea8 100644 --- a/torch/csrc/jit/frontend/ir_emitter.cpp +++ b/torch/csrc/jit/frontend/ir_emitter.cpp @@ -421,7 +421,7 @@ struct Environment { "of another type (torch.jit.annotate(List[T, []]) where T " "is the type of elements in the list for Python 2)"; } - error << "\n" << why_not.str(); + error << '\n' << why_not.str(); throw ErrorReport(error); } } @@ -842,7 +842,7 @@ struct to_ir { throw( ErrorReport(def.decl().params().range()) << "Number of type annotations for" - << " function parameters (" << schema.arguments().size() << ")" + << " function parameters (" << schema.arguments().size() << ')' << " does not match the number of parameters on the function (" << expected_annotation_size << ")!"); } @@ -3452,7 +3452,7 @@ struct to_ir { throw( ErrorReport(apply.inputs()) << "expected an expression of type " << type->repr_str() - << " but found " << expr->type()->repr_str() << "\n" + << " but found " << expr->type()->repr_str() << '\n' << why_not.str()); } @@ -3828,13 +3828,13 @@ struct to_ir { if (!is_key_subtype) { err << "Generated key type " << key_type->repr_str() << " did not match the annotated key type, which was " - << annotated_k_type->repr_str() << "\n"; + << annotated_k_type->repr_str() << '\n'; } if (!is_value_subtype) { err << "Generated value type " << value_type->repr_str() << " did not match the annotated value type, which was " - << annotated_v_type->repr_str() << "\n" + << annotated_v_type->repr_str() << '\n' << ss.str(); } diff --git a/torch/csrc/jit/frontend/parser.cpp b/torch/csrc/jit/frontend/parser.cpp index ef49c15bab2..f56a392cc93 100644 --- a/torch/csrc/jit/frontend/parser.cpp +++ b/torch/csrc/jit/frontend/parser.cpp @@ -23,7 +23,7 @@ Decl mergeTypesFromTypeComment( << type_annotation_decl.params().size() << ") did not match the number of " << (is_method ? "method" : "function") << " parameters (" - << expected_num_annotations << ")"; + << expected_num_annotations << ')'; } auto old = decl.params(); auto _new = type_annotation_decl.params(); diff --git a/torch/csrc/jit/frontend/schema_matching.cpp b/torch/csrc/jit/frontend/schema_matching.cpp index 8e0d94b59ac..c3525ac9c8a 100644 --- a/torch/csrc/jit/frontend/schema_matching.cpp +++ b/torch/csrc/jit/frontend/schema_matching.cpp @@ -364,7 +364,7 @@ static std::optional tryMatchSchema( } auto err = [&]() -> std::ostream& { - *failure_messages << "\n" << schema << ":\n"; + *failure_messages << '\n' << schema << ":\n"; return *failure_messages; }; @@ -751,7 +751,7 @@ Value* emitBuiltinCall( } else { error << "Here are some suggestions: \n"; for (const auto& sym : close_symbols) { - error << "\t" << sym.toQualString() << "\n"; + error << '\t' << sym.toQualString() << '\n'; } error << "\nThe original call is"; } diff --git a/torch/csrc/jit/frontend/source_range.cpp b/torch/csrc/jit/frontend/source_range.cpp index 89815d386ac..b9263ad0897 100644 --- a/torch/csrc/jit/frontend/source_range.cpp +++ b/torch/csrc/jit/frontend/source_range.cpp @@ -310,7 +310,7 @@ void SourceRange::print_with_context( if (!funcname.empty()) { out << ", in " << funcname; } - out << "\n"; + out << '\n'; } // print out initial context out << str.substr(begin_context, start() - begin_context); @@ -327,7 +327,7 @@ void SourceRange::print_with_context( auto actual_line = str.substr(line_start, (line_end - line_start) + 1); out << actual_line; if (actual_line.back() != '\n') { - out << "\n"; + out << '\n'; } size_t empty_space = 0; @@ -377,7 +377,7 @@ void SourceRange::print_with_context( auto line_substr = str.substr(line_end, end_context - line_end); out << line_substr; if (!line_substr.empty() && line_substr.back() != '\n') { - out << "\n"; + out << '\n'; } } } diff --git a/torch/csrc/jit/frontend/tree.h b/torch/csrc/jit/frontend/tree.h index 12e75ec41c6..a11f196c5ac 100644 --- a/torch/csrc/jit/frontend/tree.h +++ b/torch/csrc/jit/frontend/tree.h @@ -93,9 +93,9 @@ struct Tree : c10::intrusive_ptr_target { if (trees().size() < expected_subtrees || (!allow_more && trees().size() != expected_subtrees)) { std::stringstream ss; - ss << filename << ":" << lineno << ": expected at least " + ss << filename << ':' << lineno << ": expected at least " << expected_subtrees << " subtrees, but found only " << trees().size() - << "\n"; + << '\n'; range().highlight(ss); TORCH_CHECK(false, ss.str()); } @@ -184,11 +184,11 @@ struct pretty_tree { out << t->stringValue(); break; default: - out << "(" << kindToString(t->kind()); + out << '(' << kindToString(t->kind()); for (const auto& e : t->trees()) { - out << " " << get_flat(e); + out << ' ' << get_flat(e); } - out << ")"; + out << ')'; break; } auto it_ = flat_strings.emplace(t, out.str()); @@ -201,12 +201,12 @@ struct pretty_tree { return; } std::string k = kindToString(t->kind()); - out << "(" << k; + out << '(' << k; for (const auto& e : t->trees()) { - out << "\n" << std::string(indent + 2, ' '); + out << '\n' << std::string(indent + 2, ' '); print(out, e, indent + 2); } - out << ")"; + out << ')'; } }; diff --git a/torch/csrc/jit/ir/alias_analysis.cpp b/torch/csrc/jit/ir/alias_analysis.cpp index f1353bd3103..513258236ac 100644 --- a/torch/csrc/jit/ir/alias_analysis.cpp +++ b/torch/csrc/jit/ir/alias_analysis.cpp @@ -419,14 +419,14 @@ std::string AliasDb::getElementName(const Element* e) const { } else { std::ostringstream ss; if (e->values.size() == 1) { - ss << "%" << (*e->values.begin())->debugName(); + ss << '%' << (*e->values.begin())->debugName(); return ss.str(); } - ss << "("; + ss << '('; for (const Value* v : e->values) { - ss << "%" << v->debugName() << ", "; + ss << '%' << v->debugName() << ", "; } - ss << ")"; + ss << ')'; return ss.str(); } } @@ -454,7 +454,7 @@ std::string AliasDb::toString() const { ++ct; ss << getElementName(memoryDAG_->fromIndex(pointedTo)); } - ss << "\n"; + ss << '\n'; } ct = 0; if (!element->containedElements.empty()) { @@ -466,7 +466,7 @@ std::string AliasDb::toString() const { } ++ct; } - ss << "\n"; + ss << '\n'; } } @@ -479,9 +479,9 @@ std::string AliasDb::toString() const { for (const auto value : values) { ss << getElementName(memoryDAG_->fromIndex(value)) << ", "; } - ss << "\n"; + ss << '\n'; } - ss << "\n"; + ss << '\n'; return ss.str(); } @@ -511,7 +511,7 @@ std::string AliasDb::toGraphviz() const { } else { std::ostringstream ss; if (e->values.size() == 1) { - ss << "\"\\%" << (*e->values.begin())->debugName() << "\""; + ss << "\"\\%" << (*e->values.begin())->debugName() << '"'; return ss.str(); } ss << "\"("; @@ -538,7 +538,7 @@ std::string AliasDb::toGraphviz() const { if (!element->pointsTo.empty()) { for (const auto pointedTo : element->pointsTo) { dot << " " << name(element) << " -> " - << name(memoryDAG_->fromIndex(pointedTo)) << "\n"; + << name(memoryDAG_->fromIndex(pointedTo)) << '\n'; } } if (!element->containedElements.empty()) { diff --git a/torch/csrc/jit/ir/ir.cpp b/torch/csrc/jit/ir/ir.cpp index 6febed35405..9b00a703e35 100644 --- a/torch/csrc/jit/ir/ir.cpp +++ b/torch/csrc/jit/ir/ir.cpp @@ -64,7 +64,7 @@ constexpr topo_position_t kMidPoint = 0; constexpr topo_position_t kAppendInterval = 1099511627776ULL /* 2^40 */; void printValueRef(std::ostream& out, const Value* n) { - out << "%" << n->debugName(); + out << '%' << n->debugName(); } bool isNumber(std::string_view str) { @@ -160,7 +160,7 @@ static void printAttribute(std::ostream& out, const at::Tensor& tensor) { // 1-elem tensors are usually boxed scalars, so print them like it if (tensor.numel() == 1) { auto scalar_tensor = tensor.view(std::vector{}).item(); - out << "{"; + out << '{'; if (scalar_tensor.isFloatingPoint()) { out << scalar_tensor.toDouble(); } else if (scalar_tensor.isComplex()) { @@ -168,7 +168,7 @@ static void printAttribute(std::ostream& out, const at::Tensor& tensor) { } else { out << scalar_tensor.toLong(); } - out << "}"; + out << '}'; } else if (tensor.numel() <= max_tensor_display_size) { // TODO: This is awful code. Also it doesn't work on Windows. std::ostringstream tensor_ss; @@ -191,7 +191,7 @@ static void printAttribute(std::ostream& out, const IValue& ival) { ss << "[]"; return true; } else if (input.isObject() && !input.type()->is_module()) { - ss << "object(" << &input.toObjectRef() << ")"; + ss << "object(" << &input.toObjectRef() << ')'; return true; } return false; @@ -202,14 +202,14 @@ static void printAttribute(std::ostream& out, const IValue& ival) { static void printTypeList( std::ostream& out, const std::vector& items) { - out << "["; + out << '['; int i = 0; for (auto& item : items) { if (i++ > 0) out << ", "; out << *item; } - out << "]"; + out << ']'; } void Node::printAttrValue(std::ostream& out, const Symbol& name) const { @@ -265,7 +265,7 @@ void Node::printAttrValue(std::ostream& out, const Symbol& name) const { void Node::printAttributes(std::ostream& out, bool ignore_subgraph = false) const { - out << "["; + out << '['; auto names = attributeNames(); int i = 0; for (auto name : names) { @@ -279,11 +279,11 @@ void Node::printAttributes(std::ostream& out, bool ignore_subgraph = false) // don't want to print the qualifier since it should always // be attribute, but you might be able to track down a weird // bug by printing it out. - out << name.toUnqualString() << "="; + out << name.toUnqualString() << '='; printAttrValue(out, name); } - out << "]"; + out << ']'; } SourceRange Node::sourceRange() const { @@ -313,11 +313,11 @@ std::ostream& Node::print( out << " = "; if (kind() == prim::PythonOp) { auto* pyOp = static_cast(this); - out << "^" << pyOp->name(); + out << '^' << pyOp->name(); printAttributes(out, /*ignore_subgraph=*/false); pyOp->writeScalars(out); } else if (hasAttribute(attr::Subgraph) && groups) { - out << kind().toQualString() << "_" << groups->size(); + out << kind().toQualString() << '_' << groups->size(); if (print_attributes && numAttributes() > 1 && kind() != prim::DifferentiableGraph) { printAttributes(out, /*ignore_subgraph=*/true); @@ -330,7 +330,7 @@ std::ostream& Node::print( printAttributes(out); } } - out << "(" << inputs() << ")"; + out << '(' << inputs() << ')'; if (print_scopes) { std::string scName = scopeName(); @@ -350,7 +350,7 @@ std::ostream& Node::print( } if (auto file_line_col = r.file_line_col()) { auto [filename, line, col] = *file_line_col; - out << " # " << filename << ":" << line << ":" << col; + out << " # " << filename << ':' << line << ':' << col; } } @@ -358,11 +358,11 @@ std::ostream& Node::print( return out; } - out << "\n"; + out << '\n'; for (const auto i : c10::irange(blocks().size())) { auto b = blocks()[i]; - indent(out, level + 1) << "block" << i << "(" + indent(out, level + 1) << "block" << i << '(' << const_value_list_with_types(b->inputs()) << "):\n"; for (auto nested : b->nodes()) { @@ -389,7 +389,7 @@ std::ostream& Graph::print(std::ostream& out, bool print_source_locations) out << " return (" << outputs() << ")\n"; size_t i = 0; for (auto fg : groups) { - out << "with " << fg->kind().toQualString() << "_" << i++ << " = " + out << "with " << fg->kind().toQualString() << '_' << i++ << " = " << *fg->g(attr::Subgraph); } out.flush(); @@ -397,7 +397,7 @@ std::ostream& Graph::print(std::ostream& out, bool print_source_locations) /* // Uncomment this to debug all_nodes issues { - out << "\n"; + out << '\n'; out << "all_nodes:\n"; for (auto& n : all_nodes) { printNode(out, const_cast(n), nullptr); @@ -654,7 +654,7 @@ void Graph::lint() const { } void Graph::dump() const { - std::cout << *this << "\n"; + std::cout << *this << '\n'; } void Graph::push_scope(const std::string& scope_name) { @@ -888,7 +888,7 @@ Value* Value::setDebugName(const std::string& name) { static std::locale c_locale("C"); ss.imbue(c_locale); #endif - ss << name_base << "." << suffix++; + ss << name_base << '.' << suffix++; replacement_name = ss.str(); } while (names.count(replacement_name) > 0); @@ -1069,7 +1069,7 @@ bool Node::mustBeNone() const { } void Node::dump() const { - std::cout << *this << "\n"; + std::cout << *this << '\n'; } const FunctionSchema& Node::schema() const { @@ -1106,7 +1106,7 @@ const Operator& Node::getOperator() const { auto er = ErrorReport(sourceRange()); er << "Schema not found for node. File a bug report.\n"; - er << "Node: " << *this << "\n"; + er << "Node: " << *this << '\n'; er << "Input types:"; for (const auto i : c10::irange(inputs().size())) { if (i > 0) @@ -1117,13 +1117,13 @@ const Operator& Node::getOperator() const { if (!candidates.empty()) { er << "\ncandidates were:\n"; for (auto& candidate : candidates) { - er << " " << candidate->schema() << "\n"; + er << " " << candidate->schema() << '\n'; } } else { er << "\nno candidates found\n"; } er << "within the graph:\n"; - er << *owningGraph() << "\n"; + er << *owningGraph() << '\n'; throw er; } diff --git a/torch/csrc/jit/jit_log.cpp b/torch/csrc/jit/jit_log.cpp index 745d397f593..83f0e158d31 100644 --- a/torch/csrc/jit/jit_log.cpp +++ b/torch/csrc/jit/jit_log.cpp @@ -159,9 +159,9 @@ std::string jit_log_prefix( int l, const std::string& in_str) { std::stringstream prefix_ss; - prefix_ss << "["; - prefix_ss << level << " "; - prefix_ss << c10::detail::StripBasename(std::string(fn)) << ":"; + prefix_ss << '['; + prefix_ss << level << ' '; + prefix_ss << c10::detail::StripBasename(std::string(fn)) << ':'; prefix_ss << std::setfill('0') << std::setw(3) << l; prefix_ss << "] "; diff --git a/torch/csrc/jit/mobile/debug_info.cpp b/torch/csrc/jit/mobile/debug_info.cpp index 0a410a42fef..be61d1d2ec5 100644 --- a/torch/csrc/jit/mobile/debug_info.cpp +++ b/torch/csrc/jit/mobile/debug_info.cpp @@ -103,7 +103,7 @@ std::pair getStackTraceWithModuleHierarchy( std::get(last_entry); module_info.append(".").append(node_name); std::ostringstream ss; - ss << "Module hierarchy:" << module_info << "\n"; + ss << "Module hierarchy:" << module_info << '\n'; format_stack_trace(ss, stack_entries); return {ss.str(), std::move(module_info)}; } diff --git a/torch/csrc/jit/mobile/import_data.cpp b/torch/csrc/jit/mobile/import_data.cpp index 1bd34e4a823..7071a08daf6 100644 --- a/torch/csrc/jit/mobile/import_data.cpp +++ b/torch/csrc/jit/mobile/import_data.cpp @@ -138,7 +138,7 @@ c10::IValue IValueUnpickler::readArchive( auto read_record = [&](const std::string& name) { std::stringstream ss; - ss << archive_name << "/" << name; + ss << archive_name << '/' << name; return std::get<0>(reader_->getRecord(ss.str())); }; diff --git a/torch/csrc/jit/mobile/interpreter.cpp b/torch/csrc/jit/mobile/interpreter.cpp index b5e67cd83cb..41fc8d49efb 100644 --- a/torch/csrc/jit/mobile/interpreter.cpp +++ b/torch/csrc/jit/mobile/interpreter.cpp @@ -95,11 +95,11 @@ bool InterpreterState::run(Stack& stack) { debug_handle = *handle; } - // std::cout << "RUNNING " << pc << " " << code.instructions_[pc]; + // std::cout << "RUNNING " << pc << ' ' << code.instructions_[pc]; // if (inst.op == OP) { // std::cout << ", " << code.op_names_[inst.X].name; // if (!code.op_names_[inst.X].overload_name.empty()) { - // std::cout << "." << code.op_names_[inst.X].overload_name; + // std::cout << '.' << code.op_names_[inst.X].overload_name; // } // } // std::cout << std::endl; diff --git a/torch/csrc/jit/mobile/model_tracer/tracer.cpp b/torch/csrc/jit/mobile/model_tracer/tracer.cpp index b821e7dfcdc..c6a94dc8a1f 100644 --- a/torch/csrc/jit/mobile/model_tracer/tracer.cpp +++ b/torch/csrc/jit/mobile/model_tracer/tracer.cpp @@ -60,7 +60,7 @@ static void printOpYAML( bool is_used_for_training, bool is_root_operator, bool include_all_overloads) { - out << std::string(indent, ' ') << op_name << ":" << '\n'; + out << std::string(indent, ' ') << op_name << ':' << '\n'; out << std::string(indent + 2, ' ') << "is_used_for_training: " << (is_used_for_training ? "true" : "false") << '\n'; @@ -88,7 +88,7 @@ static void printDTypeYAML( const std::string& kernel_tag_name, const std::set& dtypes) { std::string indent_str = std::string(indent, ' '); - out << indent_str << kernel_tag_name << ":" << '\n'; + out << indent_str << kernel_tag_name << ':' << '\n'; for (auto& dtype : dtypes) { out << indent_str << "- " << dtype << '\n'; } diff --git a/torch/csrc/jit/passes/check_strict_fusion.cpp b/torch/csrc/jit/passes/check_strict_fusion.cpp index 41f60fa3591..731382c3163 100644 --- a/torch/csrc/jit/passes/check_strict_fusion.cpp +++ b/torch/csrc/jit/passes/check_strict_fusion.cpp @@ -73,7 +73,7 @@ static void checkForUnfusedOps(Node* enter_node) { std::stringstream ss; ss << "Found multiple fusions: \n"; for (Node* n : guarding_ifs) { - ss << *n << "\n"; + ss << *n << '\n'; } throw(ErrorReport(enter_node->input()->node()->sourceRange()) << ss.str()); } @@ -100,13 +100,13 @@ static void checkForUnfusedOps(Node* enter_node) { std::stringstream ss; ss << "Found unfused operators: \n"; for (Node* unfused : unfused_nodes_not_used_in_guard) { - ss << "\t"; + ss << '\t'; if (unfused->maybeSchema()) { ss << unfused->schema(); } else { unfused->kind().toDisplayString(); } - ss << "\n"; + ss << '\n'; } throw(ErrorReport(enter_node->input()->node()->sourceRange()) << ss.str()); } diff --git a/torch/csrc/jit/passes/liveness.cpp b/torch/csrc/jit/passes/liveness.cpp index c4a80872d61..138c6fc78f7 100644 --- a/torch/csrc/jit/passes/liveness.cpp +++ b/torch/csrc/jit/passes/liveness.cpp @@ -72,7 +72,7 @@ struct LivenessAnalyzer { std::cout << e.first->outputs()[0]->debugName(); } - std::cout << " " << e.first->kind().toQualString(); + std::cout << ' ' << e.first->kind().toQualString(); std::cout << " = "; dump(e.second); std::cout << '\n'; @@ -83,16 +83,16 @@ struct LivenessAnalyzer { void dump(const std::vector& set) { bool first = true; - std::cout << "["; + std::cout << '['; for (auto el : set) { if (first) { first = false; } else { std::cout << ", "; } - std::cout << el->debugName() << "(" << el->unique() << ")"; + std::cout << el->debugName() << '(' << el->unique() << ')'; } - std::cout << "]"; + std::cout << ']'; } private: diff --git a/torch/csrc/jit/passes/onnx.cpp b/torch/csrc/jit/passes/onnx.cpp index cddae777682..d3231222cb9 100644 --- a/torch/csrc/jit/passes/onnx.cpp +++ b/torch/csrc/jit/passes/onnx.cpp @@ -292,7 +292,7 @@ void NodeToONNX( std::ostringstream ss; ss << "symbolic for " << op_name << " produced an incorrect number of outputs (expected "; - ss << num_old_outputs << ", but got " << outputs.size() << ")"; + ss << num_old_outputs << ", but got " << outputs.size() << ')'; throw std::runtime_error(ss.str()); } // For const node, it does not need params_dict info, so set it to {}. diff --git a/torch/csrc/jit/passes/onnx/constant_map.cpp b/torch/csrc/jit/passes/onnx/constant_map.cpp index e4ec14a5a01..902dc5f8924 100644 --- a/torch/csrc/jit/passes/onnx/constant_map.cpp +++ b/torch/csrc/jit/passes/onnx/constant_map.cpp @@ -301,7 +301,7 @@ void ConstantValueMap::PrintMaps() { } } } - ss << " (rank = " << x.second << ")"; + ss << " (rank = " << x.second << ')'; std::cout << "node " << x.first << ": " << ss.str() << '\n'; } std::cout << '\n'; @@ -346,9 +346,9 @@ void ConstantValueMap::PrintMaps() { std::cout << "(node " << x.first << ": "; for (const auto& dim : x.second.dim()) { if (dim.has_dim_param()) { - std::cout << dim.dim_param() << " "; + std::cout << dim.dim_param() << ' '; } else { - std::cout << dim.dim_value() << " "; + std::cout << dim.dim_value() << ' '; } } std::cout << "), "; @@ -361,7 +361,7 @@ void ConstantValueMap::PrintMaps() { std::cout << "SymbolDim Map:" << '\n'; count = 0; for (const auto& x : ConstantValueMap::getInstance().symbolDimMap) { - std::cout << "(" << x.first << ": " << x.second << "), "; + std::cout << '(' << x.first << ": " << x.second << "), "; count++; if (count % 10 == 0) { std::cout << '\n'; @@ -370,7 +370,7 @@ void ConstantValueMap::PrintMaps() { std::cout << "DimSymbol Map:" << '\n'; count = 0; for (const auto& x : ConstantValueMap::getInstance().dimSymbolMap) { - std::cout << "(" << x.first << ": " << x.second << "), "; + std::cout << '(' << x.first << ": " << x.second << "), "; count++; if (count % 10 == 0) { std::cout << '\n'; diff --git a/torch/csrc/jit/passes/onnx/function_extraction.cpp b/torch/csrc/jit/passes/onnx/function_extraction.cpp index 7901b44bb85..fab3110954f 100644 --- a/torch/csrc/jit/passes/onnx/function_extraction.cpp +++ b/torch/csrc/jit/passes/onnx/function_extraction.cpp @@ -250,7 +250,7 @@ void FunctionExtractor::DebugPrintScopeContexts( GRAPH_UPDATE("Children scopes: ", [&]() { std::stringstream ss; for (const auto& child_scope : it.second->children_) { - ss << child_scope->name().toDisplayString() << " "; + ss << child_scope->name().toDisplayString() << ' '; } return ss.str(); }()); diff --git a/torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp b/torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp index a188eb0abd6..48f13499a5f 100644 --- a/torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp +++ b/torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp @@ -440,7 +440,7 @@ std::string InplaceConverter::ValueTracker::toString() const { ss << "Value[" << idx << "]: " << it.first->debugName() << '\n'; ss << " Mapping to "; for (auto v : it.second) { - ss << v->debugName() << " "; + ss << v->debugName() << ' '; } ss << '\n'; idx++; diff --git a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp index 153408b350b..999f8247b7c 100644 --- a/torch/csrc/jit/passes/symbolic_shape_analysis.cpp +++ b/torch/csrc/jit/passes/symbolic_shape_analysis.cpp @@ -152,11 +152,11 @@ static std::ostream& operator<<(std::ostream& os, const ShapeArguments& sa) { return os; } - os << "("; + os << '('; for (const auto i : c10::irange(sa.len())) { os << sa.at(i); } - os << ")"; + os << ')'; return os; } diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.cpp b/torch/csrc/jit/passes/utils/subgraph_utils.cpp index f9fd65f9ce5..f54adbd7223 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.cpp +++ b/torch/csrc/jit/passes/utils/subgraph_utils.cpp @@ -612,7 +612,7 @@ static std::string truncateStrWithHash(const std::string& s, size_t maxlen) { (maxlen > hash_str.size() + 1) ? (maxlen - hash_str.size() - 1) : maxlen; std::stringstream truncated; truncated << s.substr(0, trunc_len); - truncated << "_" << hash_str; + truncated << '_' << hash_str; return truncated.str(); } @@ -626,7 +626,7 @@ std::string generateNameForGraph( if (!node->kind().is_aten()) { continue; } - graph_name << "_" << node->kind().toUnqualString(); + graph_name << '_' << node->kind().toUnqualString(); } return truncateStrWithHash(graph_name.str(), maxlen); } diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp index f7d855a5157..a7f16a7dc5a 100644 --- a/torch/csrc/jit/python/init.cpp +++ b/torch/csrc/jit/python/init.cpp @@ -1798,7 +1798,7 @@ void initJITBindings(PyObject* module) { << "' with schema(s):\n"; for (const auto& op : sortedOps) { - docstring << " " << op->schema() << "\n"; + docstring << " " << op->schema() << '\n'; } py::list overload_names; diff --git a/torch/csrc/jit/python/python_arg_flatten.h b/torch/csrc/jit/python/python_arg_flatten.h index 232f5b6ea08..472b2577364 100644 --- a/torch/csrc/jit/python/python_arg_flatten.h +++ b/torch/csrc/jit/python/python_arg_flatten.h @@ -79,17 +79,17 @@ static inline std::ostream& operator<<( out << ", "; out << meta.sizes[i]; } - out << "}"; + out << '}'; return out; } static inline std::ostream& operator<<( std::ostream& out, const IODescriptor& desc) { - out << desc.structure << "\n"; - out << " with grad_enabled=" << desc.grad_enabled << "\n"; + out << desc.structure << '\n'; + out << " with grad_enabled=" << desc.grad_enabled << '\n'; for (const auto i : c10::irange(desc.metadata.size())) { - out << " with v" << i << " having type " << desc.metadata[i] << "\n"; + out << " with v" << i << " having type " << desc.metadata[i] << '\n'; } return out; } diff --git a/torch/csrc/jit/python/python_ir.cpp b/torch/csrc/jit/python/python_ir.cpp index 88794ecbf3d..6e5dcde957d 100644 --- a/torch/csrc/jit/python/python_ir.cpp +++ b/torch/csrc/jit/python/python_ir.cpp @@ -61,7 +61,7 @@ static std::ostream& printPyObject(std::ostream& out, const THPObjectPtr& obj) { // tuple.__str__; this doesn't work because Python doesn't allow // monkeypatching methods of built-in types. auto pytuple = pyobj.cast(); - out << "("; + out << '('; size_t i = 0; for (const auto& o : pytuple) { if (i > 0) { @@ -72,9 +72,9 @@ static std::ostream& printPyObject(std::ostream& out, const THPObjectPtr& obj) { i++; } if (i == 1) { - out << ","; + out << ','; } - out << ")"; + out << ')'; return out; } else { return out << THPUtils_unpackString(py::str(pyobj).ptr()); @@ -154,14 +154,14 @@ std::optional ConcretePythonOp::autogradFunction() const { } void ConcretePythonOp::writeScalars(std::ostream& out) const { - out << "("; + out << '('; int i = 0; for (auto& scalar : scalar_args) { if (i++ > 0) out << ", "; printPyObject(out, scalar); } - out << ")"; + out << ')'; } void ConcretePythonOp::lint_python() const { @@ -506,7 +506,7 @@ void initPythonIRBindings(PyObject* module_) { "__repr__", [](Value& n) { std::stringstream ss; - ss << n.debugName() << " defined in (" << *n.node() << ")"; + ss << n.debugName() << " defined in (" << *n.node() << ')'; return ss.str(); }) .VS(type) diff --git a/torch/csrc/jit/python/python_tracer.cpp b/torch/csrc/jit/python/python_tracer.cpp index 81da1605fcb..92103119973 100644 --- a/torch/csrc/jit/python/python_tracer.cpp +++ b/torch/csrc/jit/python/python_tracer.cpp @@ -55,8 +55,8 @@ SourceRange getPythonInterpreterSourceRange() { if (src && src->filename()) { auto line = src->starting_line_no() + src->lineno_for_offset(range.start()); - stack_trace << *(src->filename()) << "(" << line - << "): " << entry.filename << "\n"; + stack_trace << *(src->filename()) << '(' << line + << "): " << entry.filename << '\n'; if (!source_filename) { source_filename = *(src->filename()); source_line = line; @@ -218,7 +218,7 @@ void initPythonTracerBindings(PyObject* module) { "__repr__", [](const TracingState& s) { std::ostringstream ss; - ss << ""; + ss << "'; return ss.str(); }) .def( diff --git a/torch/csrc/jit/python/script_init.cpp b/torch/csrc/jit/python/script_init.cpp index b9fbf4d1ec3..ca75e6b9864 100644 --- a/torch/csrc/jit/python/script_init.cpp +++ b/torch/csrc/jit/python/script_init.cpp @@ -497,7 +497,7 @@ static bool ivalue_tags_match(const Module& lhs, const Module& rhs) { if (item.a.isPtrType()) { // uncomment to debug type matching errors // std::cout << "MATCHING " << /*item.a <<*/ "(" << *item.a.type() << ") " - // << item.a.internalToPointer() << " " << /*item.b <<*/ " (" + // << item.a.internalToPointer() << ' ' << /*item.b <<*/ " (" // << *item.b.type() << ") " << item.b.internalToPointer() << // "\n"; @@ -902,7 +902,7 @@ void initJitScriptBindings(PyObject* module) { std::stringstream err; err << "Tried to deepcopy object "; if (auto qualname = class_type->name()) { - err << qualname->qualifiedName() << " "; + err << qualname->qualifiedName() << ' '; } err << "which does not have a __setstate__ method defined!"; throw std::runtime_error(err.str()); @@ -912,7 +912,7 @@ void initJitScriptBindings(PyObject* module) { std::stringstream err; err << "Tried to deepcopy object "; if (auto qualname = self.type()->name()) { - err << qualname->qualifiedName() << " "; + err << qualname->qualifiedName() << ' '; } err << "which does not have a __getstate__ method defined!"; throw std::runtime_error(err.str()); @@ -929,7 +929,7 @@ void initJitScriptBindings(PyObject* module) { std::stringstream err; err << "Tried to serialize object "; if (auto qualname = self.type()->name()) { - err << qualname->qualifiedName() << " "; + err << qualname->qualifiedName() << ' '; } err << "which does not have a __getstate__ method defined!"; throw std::runtime_error(err.str()); @@ -966,7 +966,7 @@ void initJitScriptBindings(PyObject* module) { std::stringstream err; err << "Tried to deserialize object "; if (auto qualname = class_type->name()) { - err << qualname->qualifiedName() << " "; + err << qualname->qualifiedName() << ' '; } err << "which does not have a __setstate__ method defined!"; throw std::runtime_error(err.str()); diff --git a/torch/csrc/jit/runtime/argument_spec.cpp b/torch/csrc/jit/runtime/argument_spec.cpp index 0a50a64e5f1..667b94556f2 100644 --- a/torch/csrc/jit/runtime/argument_spec.cpp +++ b/torch/csrc/jit/runtime/argument_spec.cpp @@ -127,7 +127,7 @@ void ArgumentSpecCreator::dump() const { break; } } - std::cout << "\n"; + std::cout << '\n'; } ArgumentSpec ArgumentSpecCreator::create(bool with_grad, const Stack& input) diff --git a/torch/csrc/jit/runtime/argument_spec.h b/torch/csrc/jit/runtime/argument_spec.h index 1b4cf86a196..a7758f1674f 100644 --- a/torch/csrc/jit/runtime/argument_spec.h +++ b/torch/csrc/jit/runtime/argument_spec.h @@ -402,12 +402,12 @@ inline std::ostream& operator<<(std::ostream& out, const ArgumentInfo& info) { } out << "Tensor(device=" << info.device() << ", type=" << toString(info.type()) << ", requires_grad=" << info.requires_grad() << ", dims=" << info.dim() - << ")"; + << ')'; return out; } inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) { - out << "{"; + out << '{'; for (const auto i : c10::irange(spec.numTensors())) { if (i > 0) out << ", "; @@ -419,7 +419,7 @@ inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) { out << ", "; out << spec.isPresent(i); } - out << "}"; + out << '}'; return out; } @@ -431,20 +431,20 @@ inline std::ostream& operator<<( } out << "Tensor(device=" << info.device() << ", type=" << toString(info.type()) << ", requires_grad=" << info.requires_grad() - << ", sizes=" << info.sizes() << ", strides=" << info.strides() << ")"; + << ", sizes=" << info.sizes() << ", strides=" << info.strides() << ')'; return out; } inline std::ostream& operator<<( std::ostream& out, const CompleteArgumentSpec& spec) { - out << "{"; + out << '{'; for (const auto i : c10::irange(spec.size())) { if (i > 0) out << ", "; out << spec.at(i); } - out << "}"; + out << '}'; return out; } diff --git a/torch/csrc/jit/runtime/instruction.cpp b/torch/csrc/jit/runtime/instruction.cpp index b591bf7318b..7388b8eac67 100644 --- a/torch/csrc/jit/runtime/instruction.cpp +++ b/torch/csrc/jit/runtime/instruction.cpp @@ -47,10 +47,10 @@ std::ostream& operator<<(std::ostream& out, Instruction inst) { auto nargs = std::strlen(OpInfo(inst.op)); out << inst.op; if (nargs > 0) { - out << " " << inst.X; + out << ' ' << inst.X; } if (nargs > 1) { - out << " " << inst.N; + out << ' ' << inst.N; } return out; } diff --git a/torch/csrc/jit/runtime/interpreter.cpp b/torch/csrc/jit/runtime/interpreter.cpp index 9d4d681f8b3..95b74376d2e 100644 --- a/torch/csrc/jit/runtime/interpreter.cpp +++ b/torch/csrc/jit/runtime/interpreter.cpp @@ -213,7 +213,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { out << "Stack:\n"; for (const auto& val : stack) { out << val; - out << "\n"; + out << '\n'; } } @@ -929,7 +929,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { python_class_name ? *python_class_name : "RuntimeError"; ss << "The following operation failed in the TorchScript interpreter.\n"; formatStackTrace(ss); - ss << class_name << ": " << msg << "\n"; + ss << class_name << ": " << msg << '\n'; if (future_) { future_->setError(std::make_exception_ptr(Future::FutureError(ss.str()))); } else if (is_jit_exception) { @@ -942,7 +942,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target { not_implemented_error->caller()); } else { if (get_cpp_stacktraces_enabled()) { - ss << e.what() << "\n"; + ss << e.what() << '\n'; } throw std::runtime_error(ss.str()); } @@ -1143,7 +1143,7 @@ std::vector currentModuleHierarchy() { } std::ostream& operator<<(std::ostream& out, const Code& code) { - out << *code.pImpl->graph_ << "\n"; + out << *code.pImpl->graph_ << '\n'; code.pImpl->dump(out); return out; } diff --git a/torch/csrc/jit/runtime/interpreter/code_impl.h b/torch/csrc/jit/runtime/interpreter/code_impl.h index 02e64d19615..3eddaec0dec 100644 --- a/torch/csrc/jit/runtime/interpreter/code_impl.h +++ b/torch/csrc/jit/runtime/interpreter/code_impl.h @@ -866,17 +866,17 @@ struct CodeImpl { } void dump(std::ostream& out, size_t i) const { - out << i << " " << instructions_[i]; + out << i << ' ' << instructions_[i]; if (instructions_[i].op == OP || instructions_[i].op == CALL || instructions_[i].op == OPN) { out << " # " << *instructions_source_[i]; } else { - out << "\n"; + out << '\n'; } } void dump(std::ostream& out) const { - out << *graph_ << "\n"; + out << *graph_ << '\n'; for (const auto i : c10::irange(instructions_.size())) { dump(out, i); } diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp index 310fe35ffaa..1f168d24e8a 100644 --- a/torch/csrc/jit/runtime/register_prim_ops.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops.cpp @@ -145,7 +145,7 @@ bool isSortableListOfObjectsOrTuples( why_not << "Only list of Tensors, ints, floats, bools, strs, " << "a User Defined Class that defines the __lt__ compare method " << "or Tuples of aforementioned types can be sorted, got list of " - << type->repr_str() << "\n"; + << type->repr_str() << '\n'; return false; } @@ -820,7 +820,7 @@ static const std::vector opGenArgs{ bool first = true; for (const IValue& i : last(stack, num_inputs)) { if (!first) - ss << " "; + ss << ' '; first = false; ss << i; } @@ -2971,10 +2971,10 @@ static const std::vector opGenArgs2{ auto i = pop(stack).toInt(); \ std::stringstream ss; \ if (i < 0) { \ - ss << "-"; \ + ss << '-'; \ i = -i; \ } \ - ss << "0" << prefix << char_op << i; \ + ss << '0' << prefix << char_op << i; \ push(stack, ss.str()); \ }, \ aliasAnalysisFromSchema()) @@ -2991,7 +2991,7 @@ static const std::vector opGenArgs2{ push(stack, "0b0"); } else { if (i < 0) { - ss << "-"; + ss << '-'; i = -i; } std::string str = std::bitset<8 * sizeof(i)>(i).to_string(); diff --git a/torch/csrc/jit/runtime/static/impl.cpp b/torch/csrc/jit/runtime/static/impl.cpp index 0a6e0b3564a..8ad348bb162 100644 --- a/torch/csrc/jit/runtime/static/impl.cpp +++ b/torch/csrc/jit/runtime/static/impl.cpp @@ -145,9 +145,9 @@ std::string dumpValueSet( std::ostringstream oss; oss << set_name << ": {"; for (const auto* val : value_set) { - oss << "%" << val->debugName() << ", "; + oss << '%' << val->debugName() << ", "; } - oss << "}"; + oss << '}'; return oss.str(); } @@ -1521,7 +1521,7 @@ void BlockRunner::benchmark( } else if (results.native_nodes.count(kind)) { std::cout << ", native)" << '\n'; } else { - std::cout << ")" << '\n'; + std::cout << ')' << '\n'; } if (generate_ai_pep_output) { @@ -1566,13 +1566,13 @@ void BlockRunner::benchmark( auto unsupported_nodes_count = results.total_nodes_count - results.out_nodes_count - results.native_nodes.size(); std::cout << "Total number of 'out' variant nodes/total number of nodes: " - << results.out_nodes_count << "/" << results.total_nodes_count + << results.out_nodes_count << '/' << results.total_nodes_count << " (" << 100.0 * static_cast(results.out_nodes_count) / static_cast(results.total_nodes_count) << "%)" << '\n'; std::cout << "Total number of nodes not covered by SR/total number of nodes: " - << unsupported_nodes_count << "/" << results.total_nodes_count + << unsupported_nodes_count << '/' << results.total_nodes_count << " (" << 100.0 * static_cast(unsupported_nodes_count) / static_cast(results.total_nodes_count) diff --git a/torch/csrc/jit/serialization/onnx.cpp b/torch/csrc/jit/serialization/onnx.cpp index 82ce2e4e360..499ed582a66 100644 --- a/torch/csrc/jit/serialization/onnx.cpp +++ b/torch/csrc/jit/serialization/onnx.cpp @@ -27,7 +27,7 @@ void dump(const onnx::TensorProto& tensor, std::ostream& stream) { for (const auto i : c10::irange(tensor.dims_size())) { stream << tensor.dims(i) << (i == tensor.dims_size() - 1 ? "" : " "); } - stream << "]"; + stream << ']'; } void dump(const onnx::TensorShapeProto& shape, std::ostream& stream) { @@ -36,7 +36,7 @@ void dump(const onnx::TensorShapeProto& shape, std::ostream& stream) { if (dim.has_dim_value()) { stream << dim.dim_value(); } else { - stream << "?"; + stream << '?'; } stream << (i == shape.dim_size() - 1 ? "" : " "); } @@ -67,7 +67,7 @@ void dump(const onnx::TypeProto_Optional& optional_type, std::ostream& stream) { } else { stream << "None"; } - stream << ">"; + stream << '>'; } void dump(const onnx::TypeProto_Sequence& sequence_type, std::ostream& stream) { @@ -77,7 +77,7 @@ void dump(const onnx::TypeProto_Sequence& sequence_type, std::ostream& stream) { } else { stream << "None"; } - stream << ">"; + stream << '>'; } void dump(const onnx::TypeProto& type, std::ostream& stream) { @@ -95,7 +95,7 @@ void dump(const onnx::TypeProto& type, std::ostream& stream) { void dump(const onnx::ValueInfoProto& value_info, std::ostream& stream) { stream << "{name: \"" << value_info.name() << "\", type:"; dump(value_info.type(), stream); - stream << "}"; + stream << '}'; } void dump(const onnx::GraphProto& graph, std::ostream& stream, size_t indent); @@ -123,36 +123,36 @@ void dump( for (const auto i : c10::irange(attr.floats_size())) { stream << attr.floats(i) << (i == attr.floats_size() - 1 ? "" : " "); } - stream << "]"; + stream << ']'; } else if (attr.ints_size()) { stream << "ints, values: ["; for (const auto i : c10::irange(attr.ints_size())) { stream << attr.ints(i) << (i == attr.ints_size() - 1 ? "" : " "); } - stream << "]"; + stream << ']'; } else if (attr.strings_size()) { stream << "strings, values: ["; for (const auto i : c10::irange(attr.strings_size())) { stream << "'" << attr.strings(i) << "'" << (i == attr.strings_size() - 1 ? "" : " "); } - stream << "]"; + stream << ']'; } else if (attr.tensors_size()) { stream << "tensors, values: ["; for (auto& t : attr.tensors()) { dump(t, stream); } - stream << "]"; + stream << ']'; } else if (attr.graphs_size()) { stream << "graphs, values: ["; for (auto& g : attr.graphs()) { dump(g, stream, indent + 1); } - stream << "]"; + stream << ']'; } else { stream << "UNKNOWN"; } - stream << "}"; + stream << '}'; } void dump(const onnx::NodeProto& node, std::ostream& stream, size_t indent) { @@ -174,31 +174,31 @@ void dump(const onnx::NodeProto& node, std::ostream& stream, size_t indent) { void dump(const onnx::GraphProto& graph, std::ostream& stream, size_t indent) { stream << idt(indent) << "GraphProto {" << nlidt(indent + 1) << "name: \"" - << graph.name() << "\"" << nlidt(indent + 1) << "inputs: ["; + << graph.name() << '"' << nlidt(indent + 1) << "inputs: ["; for (const auto i : c10::irange(graph.input_size())) { dump(graph.input(i), stream); stream << (i == graph.input_size() - 1 ? "" : ","); } - stream << "]" << nlidt(indent + 1) << "outputs: ["; + stream << ']' << nlidt(indent + 1) << "outputs: ["; for (const auto i : c10::irange(graph.output_size())) { dump(graph.output(i), stream); stream << (i == graph.output_size() - 1 ? "" : ","); } - stream << "]" << nlidt(indent + 1) << "value_infos: ["; + stream << ']' << nlidt(indent + 1) << "value_infos: ["; for (const auto i : c10::irange(graph.value_info_size())) { dump(graph.value_info(i), stream); stream << (i == graph.value_info_size() - 1 ? "" : ","); } - stream << "]" << nlidt(indent + 1) << "initializers: ["; + stream << ']' << nlidt(indent + 1) << "initializers: ["; for (const auto i : c10::irange(graph.initializer_size())) { dump(graph.initializer(i), stream); stream << (i == graph.initializer_size() - 1 ? "" : ","); } - stream << "]" << nlidt(indent + 1) << "nodes: [" << nlidt(indent + 2); + stream << ']' << nlidt(indent + 1) << "nodes: [" << nlidt(indent + 2); for (const auto i : c10::irange(graph.node_size())) { dump(graph.node(i), stream, indent + 2); if (i != graph.node_size() - 1) { - stream << "," << nlidt(indent + 2); + stream << ',' << nlidt(indent + 2); } } stream << nlidt(indent + 1) << "]\n" << idt(indent) << "}\n"; @@ -208,14 +208,14 @@ void dump( const onnx::OperatorSetIdProto& operator_set_id, std::ostream& stream) { stream << "OperatorSetIdProto { domain: " << operator_set_id.domain() - << ", version: " << operator_set_id.version() << "}"; + << ", version: " << operator_set_id.version() << '}'; } void dump(const onnx::ModelProto& model, std::ostream& stream, size_t indent) { stream << idt(indent) << "ModelProto {" << nlidt(indent + 1) - << "producer_name: \"" << model.producer_name() << "\"" - << nlidt(indent + 1) << "domain: \"" << model.domain() << "\"" - << nlidt(indent + 1) << "doc_string: \"" << model.doc_string() << "\""; + << "producer_name: \"" << model.producer_name() << '"' + << nlidt(indent + 1) << "domain: \"" << model.domain() << '"' + << nlidt(indent + 1) << "doc_string: \"" << model.doc_string() << '"'; if (model.has_graph()) { stream << nlidt(indent + 1) << "graph:\n"; dump(model.graph(), stream, indent + 2); diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp index 0622dbb5cd9..1d5a2e77931 100644 --- a/torch/csrc/jit/serialization/pickler.cpp +++ b/torch/csrc/jit/serialization/pickler.cpp @@ -130,7 +130,7 @@ void Pickler::pushIValueImpl(const IValue& ivalue) { err << "Cannot serialize custom bound C++ class"; if (memoized_class_types_ && !memoized_class_types_->empty()) { if (auto qualname = memoized_class_types_->back()->name()) { - err << " " << qualname->qualifiedName(); + err << ' ' << qualname->qualifiedName(); } } err << ". Please define serialization methods via def_pickle() for " diff --git a/torch/csrc/jit/serialization/python_print.cpp b/torch/csrc/jit/serialization/python_print.cpp index 70e188816fb..bf7e5250487 100644 --- a/torch/csrc/jit/serialization/python_print.cpp +++ b/torch/csrc/jit/serialization/python_print.cpp @@ -381,7 +381,7 @@ struct PythonPrintImpl { static std::string makeValidIdentifier(const std::string& candidate) { std::stringstream ss; if (candidate.empty() || isdigit(candidate[0])) - ss << "_"; + ss << '_'; for (char c : candidate) { if (isupper(c) || islower(c) || isdigit(c) || c == '_') ss << c; @@ -487,11 +487,11 @@ struct PythonPrintImpl { if (isValidIdentifier(val_name)) { stmt << val_name; } else { - stmt << "(" << val_name << ")"; + stmt << '(' << val_name << ')'; } - stmt << "["; + stmt << '['; stmt << useOf(inputs[1]); - stmt << "]"; + stmt << ']'; } void printDict( @@ -534,7 +534,7 @@ struct PythonPrintImpl { body_ << " = "; // or if value is being assigned to something of a union type printValueList(body_, rhs); - body_ << "\n"; + body_ << '\n'; } bool requiresAnnotation(Value* lhs, Value* rhs) { @@ -555,7 +555,7 @@ struct PythonPrintImpl { if (requiresAnnotation(lhs[i], rhs[i])) { body_ << ": " << lhs[i]->type()->annotation_str(type_printer_); } - body_ << " = " << useOf(rhs[i]) << "\n"; + body_ << " = " << useOf(rhs[i]) << '\n'; } } @@ -705,7 +705,7 @@ struct PythonPrintImpl { printValueList(body_, node->outputs()); body_ << " = "; } - body_ << expr << "\n"; + body_ << expr << '\n'; } // Recursively check contained types for any class dependencies @@ -794,7 +794,7 @@ struct PythonPrintImpl { indent(); body_ << "return "; printValueList(body_, node->inputs()); - body_ << "\n"; + body_ << '\n'; } break; case prim::Loop: @@ -814,7 +814,7 @@ struct PythonPrintImpl { if (!node->outputs().empty()) { printValueList(body_, node->outputs(), "", ", = "); } - body_ << useOf(node->input()) << "\n"; + body_ << useOf(node->input()) << '\n'; break; case prim::SetAttr: { const auto obj = node->inputs().at(0); @@ -822,8 +822,8 @@ struct PythonPrintImpl { const auto type = obj->type()->expect(); const auto& attrname = node->s(attr::name); indent(); - body_ << useOf(obj) << "." << attrname << " = " << useOf(newVal) - << "\n"; + body_ << useOf(obj) << '.' << attrname << " = " << useOf(newVal) + << '\n'; } break; case prim::fork: { // the subgraph gets emitted as another function @@ -836,7 +836,7 @@ struct PythonPrintImpl { } printBody(graph->block()); std::stringstream ss; - ss << "fork(" << name << ")"; + ss << "fork(" << name << ')'; printOutputDefinition(node, ss.str()); } break; case prim::awaitable: { @@ -850,7 +850,7 @@ struct PythonPrintImpl { } printBody(graph->block()); std::stringstream ss; - ss << "awaitable(" << name << ")"; + ss << "awaitable(" << name << ')'; printOutputDefinition(node, ss.str()); } break; case prim::Enter: { @@ -884,7 +884,7 @@ struct PythonPrintImpl { auto name = useOf(node->output())->str(); std::shared_ptr graph = node->g(attr::Subgraph); indent(); - body_ << "def " << name << "("; + body_ << "def " << name << '('; assignValuesToTheirUniqueNames(graph->inputs()); for (size_t i = 0; i < graph->inputs().size(); ++i) { Value* v = graph->inputs().at(i); @@ -903,7 +903,7 @@ struct PythonPrintImpl { assignValuesToTheirUniqueNames(out); indent(); body_ << useOf(out) << " : " << out->type()->annotation_str() << " = " - << useOf(container) << "[" << useOf(key) << "]\n"; + << useOf(container) << '[' << useOf(key) << "]\n"; } break; default: auto ss = std::make_shared(&source_range_stack_); @@ -992,7 +992,7 @@ struct PythonPrintImpl { // doing it here ensures we do not have fix up archives later stmt << "torch." << kind.toUnqualString(); } else { - stmt << "ops." << kind.ns().toUnqualString() << "." + stmt << "ops." << kind.ns().toUnqualString() << '.' << kind.toUnqualString(); } } @@ -1011,14 +1011,14 @@ struct PythonPrintImpl { << "If this is a nn.ModuleList, add it to __constants__"); } std::stringstream scalars_stream; - stmt << "^" << value->name(); + stmt << '^' << value->name(); value->writeScalars(scalars_stream); stmt << scalars_stream.str(); printValueList(stmt, node->inputs(), "(", ")"); } break; case prim::Uninitialized: { stmt << "uninitialized(" - << node->output()->type()->annotation_str(type_printer_) << ")"; + << node->output()->type()->annotation_str(type_printer_) << ')'; } break; case prim::Constant: { if (node->outputs().size() == 1 && @@ -1038,7 +1038,7 @@ struct PythonPrintImpl { case aten::IntImplicit: { stmt << "annotate(" << node->output()->type()->annotation_str(type_printer_) << ", " - << useOf(node->input()) << ")"; + << useOf(node->input()) << ')'; } break; case aten::Int: { printValueList(stmt, node->inputs(), "int(", ")"); @@ -1070,12 +1070,12 @@ struct PythonPrintImpl { stmt, node->inputs(), "(", node->inputs().size() == 1 ? ",)" : ")"); } break; case prim::TupleIndex: { - stmt << "(" << useOf(node->inputs().at(0)) << ")[" - << useOf(node->inputs().at(1)) << "]"; + stmt << '(' << useOf(node->inputs().at(0)) << ")[" + << useOf(node->inputs().at(1)) << ']'; } break; case prim::TupleSlice: { - stmt << "(" << useOf(node->input()) << ")[" << node->i(attr::beg) << ":" - << node->i(attr::end) << "]"; + stmt << '(' << useOf(node->input()) << ")[" << node->i(attr::beg) << ':' + << node->i(attr::end) << ']'; } break; case prim::ListConstruct: { ListTypePtr list_type = node->output()->type()->expect(); @@ -1093,7 +1093,7 @@ struct PythonPrintImpl { stmt << "annotate(" << node->output()->type()->annotation_str(type_printer_) << ", "; printValueList(stmt, node->inputs(), "[", "]"); - stmt << ")"; + stmt << ')'; // Otherwise just print a list } else { printValueList(stmt, node->inputs(), "[", "]"); @@ -1112,7 +1112,7 @@ struct PythonPrintImpl { stmt << "annotate(" << node->output()->type()->annotation_str(type_printer_) << ", "; printDict(stmt, node->inputs()); - stmt << ")"; + stmt << ')'; // Otherwise just print a dict } else { printDict(stmt, node->inputs()); @@ -1121,37 +1121,36 @@ struct PythonPrintImpl { case prim::CreateObject: { const auto classType = node->output()->type()->expect(); stmt << classType->annotation_str(type_printer_) << ".__new__(" - << classType->annotation_str(type_printer_) << ")"; + << classType->annotation_str(type_printer_) << ')'; } break; case prim::GetAttr: { const auto obj = node->inputs().at(0); const auto classType = obj->type()->expect(); const auto& field = node->s(attr::name); if (isValidIdentifier(field)) { - stmt << useOf(obj) << "." << field; + stmt << useOf(obj) << '.' << field; } else { stmt << "getattr(" << useOf(obj) << ", "; std::stringstream field_stream; c10::printQuotedString(field_stream, field); - stmt << field_stream.str() << ")"; + stmt << field_stream.str() << ')'; } } break; case prim::CallFunction: { - stmt << useOf(node->inputs().at(0)) << "("; + stmt << useOf(node->inputs().at(0)) << '('; for (size_t i = 1; i < node->inputs().size(); i++) { stmt << useOf(node->inputs()[i]) << ", "; } - stmt << ")"; + stmt << ')'; } break; case prim::CallMethod: { const auto& self = node->inputs().at(0); const auto& methodName = node->s(attr::name); - stmt << "(" << useOf(self) << ")" - << "." << methodName << "("; + stmt << '(' << useOf(self) << ')' << '.' << methodName << '('; for (size_t i = 1; i < node->inputs().size(); i++) { stmt << useOf(node->inputs()[i]) << ", "; } - stmt << ")"; + stmt << ')'; if (auto selfClass = self->type()->cast()) { deps_table_.add(selfClass); @@ -1169,7 +1168,7 @@ struct PythonPrintImpl { } break; case aten::_unwrap_optional: { printOpName(stmt, node->kind()); - stmt << "("; + stmt << '('; // we cannot recover the type of unwrap_optional(None), // using normal schema matching, so we route around this by rewriting // the call to unwrap_optional(annotated(Optional[T], None)) @@ -1177,11 +1176,11 @@ struct PythonPrintImpl { node->input()->mustBeNone()) { auto input_type = OptionalType::create(node->output()->type()); stmt << "annotate(" << input_type->annotation_str(type_printer_) - << ", " << useOf(node->input()) << ")"; + << ", " << useOf(node->input()) << ')'; } else { stmt << useOf(node->input()); } - stmt << ")"; + stmt << ')'; } break; // unchecked_unwrap_optional is no longer generated by the compiler, // but may end up here if it was first loaded from a old model and @@ -1191,7 +1190,7 @@ struct PythonPrintImpl { case prim::unchecked_cast: { stmt << "unchecked_cast(" << node->output()->type()->annotation_str(type_printer_) << ", " - << useOf(node->input()) << ")"; + << useOf(node->input()) << ')'; } break; case prim::isinstance: { stmt << "isinstance(" << useOf(node->input()) << ", "; @@ -1200,7 +1199,7 @@ struct PythonPrintImpl { stmt << types.at(0)->annotation_str(type_printer_); } else { // check multiple things, e.g. (str, list, int) - stmt << "("; + stmt << '('; bool first = true; for (const TypePtr& typ : types) { if (!first) { @@ -1209,30 +1208,29 @@ struct PythonPrintImpl { stmt << typ->annotation_str(type_printer_); first = false; } - stmt << ")"; + stmt << ')'; } - stmt << ")"; + stmt << ')'; } break; case prim::tolist: { stmt << "annotate(" << node->output()->type()->annotation_str(type_printer_) << ", "; - stmt << useOf(node->input(0)) << ".tolist()" - << ")"; + stmt << useOf(node->input(0)) << ".tolist()" << ')'; } break; case prim::EnumValue: // Note: This CAN NOT be printed as raw operator ops.prim.EnumValue // because its return type depends on type of enum and must be further // resolved, but ops.prim.EnumValue construction does not provide such // functionality. - stmt << "(" << useOf(node->input()) << ").value"; + stmt << '(' << useOf(node->input()) << ").value"; break; case prim::EnumName: - stmt << "(" << useOf(node->input()) << ").name"; + stmt << '(' << useOf(node->input()) << ").name"; break; default: { printOpName(stmt, node->kind()); const FunctionSchema& schema = node->schema(); - stmt << "("; + stmt << '('; // calculate how many args are specified. // see (https://github.com/pytorch/pytorch/pull/56079) for more // details. @@ -1257,7 +1255,7 @@ struct PythonPrintImpl { if (i < num_schema_args) { auto arg = schema.arguments().at(i); if (arg.kwarg_only()) { - stmt << arg.name() << "="; + stmt << arg.name() << '='; } } else { // vararg functions like format can have extra arguments @@ -1274,11 +1272,11 @@ struct PythonPrintImpl { // figure out the corresponding input at this index auto input_idx = node->inputs().size() - (num_schema_args - i); if (input_idx < node->inputs().size()) { - stmt << arg.name() << "=" << *useOf(node->inputs().at(input_idx)); + stmt << arg.name() << '=' << *useOf(node->inputs().at(input_idx)); } } } - stmt << ")"; + stmt << ')'; } break; } } @@ -1313,7 +1311,7 @@ struct PythonPrintImpl { const Argument& arg, TaggedStringStream& stmt, const IValue& value) { - stmt << "="; + stmt << '='; // handle broadcasting lists if (arg.type()->kind() == ListType::Kind && (value.isInt() || value.isDouble() || value.isBool())) { @@ -1363,7 +1361,7 @@ struct PythonPrintImpl { WithSourceRange guard(&source_range_stack_, graph.param_node()); indent(); - body_ << "def " << func.name() << "("; + body_ << "def " << func.name() << '('; auto param_it = graph.inputs().begin(); for (const Argument& arg : schema.arguments()) { registerClassDependencies(arg.type()); @@ -1448,14 +1446,14 @@ struct PythonPrintImpl { indent(); body_ << "__parameters__ = ["; for (const auto& param : params) { - body_ << "\"" << param << "\", "; + body_ << '"' << param << "\", "; } body_ << "]\n"; indent(); body_ << "__buffers__ = ["; for (const auto& buffer : buffers) { - body_ << "\"" << buffer << "\", "; + body_ << '"' << buffer << "\", "; } body_ << "]\n"; auto forwardPreHooks = classType->getForwardPreHooks(); @@ -1463,7 +1461,7 @@ struct PythonPrintImpl { indent(); body_ << "__forward_pre_hooks__ = ["; for (const auto& pre_hook : forwardPreHooks) { - body_ << "\"" << pre_hook->name() << "\", "; + body_ << '"' << pre_hook->name() << "\", "; } body_ << "]\n"; } @@ -1473,7 +1471,7 @@ struct PythonPrintImpl { indent(); body_ << "__forward_hooks__ = ["; for (const auto& hook : forwardHooks) { - body_ << "\"" << hook->name() << "\", "; + body_ << '"' << hook->name() << "\", "; } body_ << "]\n"; } @@ -1496,13 +1494,12 @@ struct PythonPrintImpl { } // Print out a direct manipulation of the annotations dict, like: // __annotations__["0"] = SomeType - body_ << "__annotations__[" - << "\"" << name - << "\"] = " << type->annotation_str(type_printer_) << "\n"; + body_ << "__annotations__[" << '"' << name + << "\"] = " << type->annotation_str(type_printer_) << '\n'; } else { // Otherwise: just emit a python 3 attribute annotation, like: // foo : SomeType - body_ << name << " : " << type->annotation_str(type_printer_) << "\n"; + body_ << name << " : " << type->annotation_str(type_printer_) << '\n'; } } @@ -1516,7 +1513,7 @@ struct PythonPrintImpl { << "Final[" << v.type()->annotation_str(type_printer_) << "] = "; auto ss = std::make_shared(&source_range_stack_); printConstant(*ss, v); - body_ << ss->str() << "\n"; + body_ << ss->str() << '\n'; } // TODO fields @@ -1554,7 +1551,7 @@ struct PythonPrintImpl { TORCH_INTERNAL_ASSERT(attr.type()); indent(); body_ << attr.name() << " : " - << attr.type()->annotation_str(type_printer_) << "\n"; + << attr.type()->annotation_str(type_printer_) << '\n'; } } } else if (auto interfaceType = type->cast()) { @@ -1600,7 +1597,7 @@ struct PythonPrintImpl { for (const auto& name_value : enumType->enumNamesValues()) { indent(); body_ << name_value.first << " = " << value_wrapper - << name_value.second << value_wrapper << "\n"; + << name_value.second << value_wrapper << '\n'; } } } else { diff --git a/torch/csrc/jit/tensorexpr/block_codegen.cpp b/torch/csrc/jit/tensorexpr/block_codegen.cpp index 24228cdea32..6ec55f998cc 100644 --- a/torch/csrc/jit/tensorexpr/block_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/block_codegen.cpp @@ -132,7 +132,7 @@ void BlockPrinter::visit(const ForPtr& v) { os() << '\n'; emitIndent(); PrintReshapeInfo(buf_writes, true); // print reverse reshape - os() << "}"; + os() << '}'; os() << '\n'; } else if (loop_options.is_gpu_thread_index()) { PrintDMAs(buf_reads); @@ -154,12 +154,12 @@ void BlockPrinter::PrintTensorInfo(const std::unordered_set& bufs) { emitIndent(); auto num_dims = block_analysis_->getMultiDimBuf(buf)->dims().size(); os() << block_analysis_->getInputName(buf) << " = "; - os() << "{"; + os() << '{'; for (unsigned long d = 0; d < num_dims; d++) { - os() << "{" << dim_names[d] << "};"; + os() << '{' << dim_names[d] << "};"; } os() << " elem : " << blockDtypeCppString(buf->dtype()); - os() << "}"; + os() << '}'; } for (auto& buf : bufs) { @@ -168,15 +168,14 @@ void BlockPrinter::PrintTensorInfo(const std::unordered_set& bufs) { emitIndent(); auto num_dims = block_analysis_->getMultiDimBuf(buf)->dims().size(); os() << block_analysis_->getFlatInputName(buf) << " = "; - os() << "{"; - os() << "{" << flat_dim_names[num_dims - 1] << "};"; + os() << '{'; + os() << '{' << flat_dim_names[num_dims - 1] << "};"; os() << " elem : " << blockDtypeCppString(buf->dtype()); - os() << "}" - << " // flattened tensor"; + os() << '}' << " // flattened tensor"; } os() << '\n'; emitIndent(); - os() << "}" << '\n' << '\n'; + os() << '}' << '\n' << '\n'; } void BlockPrinter::PrintArguments(const std::unordered_set& bufs) { @@ -213,7 +212,7 @@ void BlockPrinter::PrintArguments(const std::unordered_set& bufs) { emitIndent(); os() << "var bs_DPE = " << blck_sz << '\n'; emitIndent(); - os() << "}" << '\n' << '\n'; + os() << '}' << '\n' << '\n'; } void BlockPrinter::PrintBufferInfo(const std::unordered_set& bufs) { @@ -230,7 +229,7 @@ void BlockPrinter::PrintBufferInfo(const std::unordered_set& bufs) { } os() << '\n'; emitIndent(); - os() << "}" << '\n' << '\n'; + os() << '}' << '\n' << '\n'; } void BlockPrinter::PrintDistribution(const std::unordered_set& bufs) { @@ -253,14 +252,14 @@ void BlockPrinter::PrintLoop( auto trip = 0; for (auto& buf : bufs) { if (trip > 0) { - os() << ","; + os() << ','; } os() << "{dim : "; os() << block_analysis_->getFlatInputName(buf) << ".dim.0, "; os() << (block_idx ? "block: bs_N}" : "block: bs_DPE}"); ++trip; } - os() << ")"; + os() << ')'; } void BlockPrinter::PrintReshapeInfo( @@ -274,7 +273,7 @@ void BlockPrinter::PrintReshapeInfo( << ", " << (reverse ? block_analysis_->getInputName(buf) : block_analysis_->getFlatInputName(buf)) - << ")" << '\n'; + << ')' << '\n'; } } @@ -283,7 +282,7 @@ void BlockPrinter::PrintDMAs(const std::unordered_set& bufs) { emitIndent(); os() << "dma_in("; os() << block_analysis_->getFlatInputName(read); - os() << ")" << '\n'; + os() << ')' << '\n'; } } void BlockPrinter::PrintAdjustBuffers(const std::unordered_set& bufs) { @@ -291,7 +290,7 @@ void BlockPrinter::PrintAdjustBuffers(const std::unordered_set& bufs) { emitIndent(); os() << "adjust_buffer("; os() << block_analysis_->getFlatInputName(read); - os() << ")" << '\n'; + os() << ')' << '\n'; } } @@ -305,14 +304,14 @@ void BlockPrinter::visit(const StorePtr& v) { } void BlockPrinter::visit(const BlockPtr& v) { - os() << "{" << '\n'; + os() << '{' << '\n'; indent_++; for (const StmtPtr& s : v->stmts()) { s->accept(this); } indent_--; emitIndent(); - os() << "}"; + os() << '}'; } std::string BlockCodeGen::GetUniqueFuncName(const std::string& func_prefix) { @@ -341,14 +340,14 @@ void BlockCodeGen::Initialize() { }; std::string func_name = GetUniqueFuncName("func"); - os() << "kernel " << func_name << "("; + os() << "kernel " << func_name << '('; for (auto const& arg : buf_writes) { os() << block_analysis_->getInputName(arg); } for (auto const& arg : buf_reads) { - os() << ";" << block_analysis_->getInputName(arg); + os() << ';' << block_analysis_->getInputName(arg); } - os() << ")"; + os() << ')'; stmt_v->accept(printer_.get()); diff --git a/torch/csrc/jit/tensorexpr/bounds_inference.cpp b/torch/csrc/jit/tensorexpr/bounds_inference.cpp index bbc9d845fa4..034f51f46b8 100644 --- a/torch/csrc/jit/tensorexpr/bounds_inference.cpp +++ b/torch/csrc/jit/tensorexpr/bounds_inference.cpp @@ -128,10 +128,10 @@ void printBoundsInfo(const BoundsInfo& v) { if (!first) { std::cerr << ", "; } - std::cerr << ((b.kind == kLoad) ? "LOAD" : "STORE") << "("; + std::cerr << ((b.kind == kLoad) ? "LOAD" : "STORE") << '('; int i = 0; if (b.start.empty()) { - std::cerr << "0"; + std::cerr << '0'; } for (auto& s : b.start) { if (i != 0) { @@ -143,7 +143,7 @@ void printBoundsInfo(const BoundsInfo& v) { std::cerr << "; "; i = 0; if (b.stop.empty()) { - std::cerr << "0"; + std::cerr << '0'; } for (auto& s : b.stop) { if (i != 0) { @@ -152,7 +152,7 @@ void printBoundsInfo(const BoundsInfo& v) { std::cerr << *s; i++; } - std::cerr << ")"; + std::cerr << ')'; first = false; } std::cerr << "]\n"; diff --git a/torch/csrc/jit/tensorexpr/bounds_overlap.cpp b/torch/csrc/jit/tensorexpr/bounds_overlap.cpp index 0c352e3b19f..0c785504efe 100644 --- a/torch/csrc/jit/tensorexpr/bounds_overlap.cpp +++ b/torch/csrc/jit/tensorexpr/bounds_overlap.cpp @@ -35,7 +35,7 @@ static bool mustBeZero(const ExprPtr& e) { } void Bound::print() const { - std::cout << "(" << *start << ", " << *end << ")"; + std::cout << '(' << *start << ", " << *end << ')'; } bool Bound::equals(const Bound& other) const { diff --git a/torch/csrc/jit/tensorexpr/codegen.cpp b/torch/csrc/jit/tensorexpr/codegen.cpp index 41e54869850..b19a8b8964a 100644 --- a/torch/csrc/jit/tensorexpr/codegen.cpp +++ b/torch/csrc/jit/tensorexpr/codegen.cpp @@ -41,7 +41,7 @@ RegisterCodeGenList::StmtFactoryMethod RegisterCodeGenList:: oss << entry.first; index++; } - oss << "]"; + oss << ']'; throw std::runtime_error(oss.str()); } return iter->second; diff --git a/torch/csrc/jit/tensorexpr/cpp_codegen.cpp b/torch/csrc/jit/tensorexpr/cpp_codegen.cpp index fa42d48c75e..6b03b939ace 100644 --- a/torch/csrc/jit/tensorexpr/cpp_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/cpp_codegen.cpp @@ -89,28 +89,28 @@ static inline std::enable_if_t, void> visit_mod( std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) { - os << "std::fmod(" << *lhs << ", " << *rhs << ")"; + os << "std::fmod(" << *lhs << ", " << *rhs << ')'; } template static inline std:: enable_if_t || std::is_integral_v, void> visit_max(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) { - os << "std::max(" << *lhs << ", " << *rhs << ")"; + os << "std::max(" << *lhs << ", " << *rhs << ')'; } template static inline std:: enable_if_t && !std::is_integral_v, void> visit_max(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) { - os << "(" << *lhs << " < " << *rhs << ") ? " << *rhs << " : " << *lhs; + os << '(' << *lhs << " < " << *rhs << ") ? " << *rhs << " : " << *lhs; } template static inline std:: enable_if_t || std::is_integral_v, void> visit_min(std::ostream& os, const ExprPtr& lhs, const ExprPtr& rhs) { - os << "std::min(" << *lhs << ", " << *rhs << ")"; + os << "std::min(" << *lhs << ", " << *rhs << ')'; } template @@ -176,14 +176,14 @@ void CppPrinter::visit(const MinPtr& v) { } void CppPrinter::visit(const CompareSelectPtr& v) { - os() << "((" << *v->lhs() << " " - << IRPrinter::to_string(v->compare_select_op()) << " " << *v->rhs() - << ") ? " << *v->ret_val1() << " : " << *v->ret_val2() << ")"; + os() << "((" << *v->lhs() << ' ' + << IRPrinter::to_string(v->compare_select_op()) << ' ' << *v->rhs() + << ") ? " << *v->ret_val1() << " : " << *v->ret_val2() << ')'; } void CppPrinter::visit(const IfThenElsePtr& v) { os() << "((" << *v->condition() << ") ? " << *v->true_value() << " : " - << *v->false_value() << ")"; + << *v->false_value() << ')'; } void CppPrinter::visit(const AllocatePtr& v) { @@ -211,7 +211,7 @@ void CppPrinter::visit(const FreePtr& v) { void CppPrinter::visit(const LoadPtr& v) { auto flat_idx = flatten_index(v->buf()->dims(), v->indices(), v->buf()->strides()); - os() << *v->base_handle() << "[" << *flat_idx << "]"; + os() << *v->base_handle() << '[' << *flat_idx << ']'; } void CppPrinter::visit(const StorePtr& v) { @@ -221,19 +221,19 @@ void CppPrinter::visit(const StorePtr& v) { for (int lane = 0; lane < lanes; lane++) { lane_ = lane; emitIndent(); - os() << *v->base_handle() << "[" << *flat_idx << "] = " << *v->value() - << ";" << '\n'; + os() << *v->base_handle() << '[' << *flat_idx << "] = " << *v->value() + << ';' << '\n'; } } void CppPrinter::visit(const CastPtr& v) { os() << "static_cast<" << v->dtype().ToCppString() << ">(" << *v->src_value() - << ")"; + << ')'; } void CppPrinter::visit(const BitCastPtr& v) { os() << "std::bitcast<" << v->src_value()->dtype().ToCppString() << ", " - << v->dtype().ToCppString() << ">(" << *v->src_value() << ")"; + << v->dtype().ToCppString() << ">(" << *v->src_value() << ')'; } void CppPrinter::visit(const IntrinsicsPtr& v) { @@ -241,14 +241,14 @@ void CppPrinter::visit(const IntrinsicsPtr& v) { throw std::runtime_error("kRand and kSigmoid are not supported"); } - os() << "std::" << v->func_name() << "("; + os() << "std::" << v->func_name() << '('; for (size_t i = 0; i < v->nparams(); i++) { if (i > 0) { os() << ", "; } os() << *v->param(i); } - os() << ")"; + os() << ')'; } void CppPrinter::visit(const ExternalCallPtr& v) { @@ -272,7 +272,7 @@ void CppPrinter::visit(const ExternalCallPtr& v) { }; emitIndent(); - os() << "{" << '\n'; + os() << '{' << '\n'; indent_++; emitIndent(); @@ -315,9 +315,9 @@ void CppPrinter::visit(const ExternalCallPtr& v) { os() << "};" << '\n'; emitIndent(); - os() << v->func_name() << "(" << '\n'; + os() << v->func_name() << '(' << '\n'; emitIndent(); - os() << " " << bufs.size() << "," << '\n'; + os() << " " << bufs.size() << ',' << '\n'; emitIndent(); os() << " buf_ptrs," << '\n'; emitIndent(); @@ -327,20 +327,20 @@ void CppPrinter::visit(const ExternalCallPtr& v) { emitIndent(); os() << " buf_dtypes," << '\n'; emitIndent(); - os() << " " << v->args().size() << "," << '\n'; + os() << " " << v->args().size() << ',' << '\n'; emitIndent(); os() << " extra_args);" << '\n'; indent_--; emitIndent(); - os() << "}" << '\n'; + os() << '}' << '\n'; } void CppPrinter::visit(const LetPtr& v) { if (v->var()->dtype().lanes() == 1) { emitIndent(); - os() << v->var()->dtype().ToCppString() << " " << *v->var() << " = " - << *v->value() << ";" << '\n'; + os() << v->var()->dtype().ToCppString() << ' ' << *v->var() << " = " + << *v->value() << ';' << '\n'; } else { vector_vars_[v->var()] = v->value(); } @@ -370,7 +370,7 @@ void CppCodeGen::init() { apply_visitor(var_name_rewriter_.get()); printer_->printPrologue(); - os() << "void " << kernel_func_name() << "("; + os() << "void " << kernel_func_name() << '('; const std::vector buffer_args = this->buffer_args(); for (size_t i = 0; i < buffer_args.size(); i++) { if (i > 0) { @@ -381,7 +381,7 @@ void CppCodeGen::init() { Dtype dtype = buffer_arg.dtype(); os() << dtype.ToCppString() << (buffer_arg.isVar() ? " " : "* ") << *var; } - os() << ")"; + os() << ')'; stmt()->accept(printer_.get()); os() << '\n'; } diff --git a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp index 6131b55883d..264e01d65db 100644 --- a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp @@ -195,8 +195,8 @@ void CudaPrinter::print_flat_alloc(const AllocatePtr& alloc) { throw std::runtime_error("Only integer dimensions are supported for now"); } } - os() << dtypeToCppString(alloc->dtype()) << " " << (*alloc->buffer_var()) - << "[" << flat_size << "];" << '\n'; + os() << dtypeToCppString(alloc->dtype()) << ' ' << (*alloc->buffer_var()) + << '[' << flat_size << "];" << '\n'; } void CudaPrinter::visit(const AllocatePtr& v) { @@ -234,9 +234,9 @@ void CudaPrinter::visit(const CastPtr& v) { : v->src_value()->dtype().scalar_type() == ScalarType::BFloat16 ? "__bfloat162float" : ("(" + dtypeToCppString(v->dtype()) + ")"); - os() << castFn << "("; + os() << castFn << '('; v->src_value()->accept(this); - os() << ")"; + os() << ')'; } void CudaPrinter::visit(const IntrinsicsPtr& v) { @@ -265,14 +265,14 @@ void CudaPrinter::visit(const IntrinsicsPtr& v) { func_name = "isnan"; } - os() << func_name << "("; + os() << func_name << '('; for (const auto i : c10::irange(v->nparams())) { if (i > 0) { os() << ", "; } os() << *v->param(i); } - os() << ")"; + os() << ')'; } void CudaPrinter::visit(const ExternalCallPtr& v) { @@ -293,15 +293,15 @@ void CudaPrinter::visit(const LoadPtr& v) { v->dtype().scalar_type() == ScalarType::Half || v->dtype().scalar_type() == ScalarType::BFloat16) { // There's no __ldg overload for bool or half. - os() << *v->base_handle() << "[" << *v->flat_index() << "]"; + os() << *v->base_handle() << '[' << *v->flat_index() << ']'; return; } if (cuda_analysis_->is_buf_store_target(v->buf())) { // Cuda __ldg can only be applied on read-only buffers. - os() << *v->base_handle() << "[" << *v->flat_index() << "]"; + os() << *v->base_handle() << '[' << *v->flat_index() << ']'; return; } - os() << "__ldg(" << *v->base_handle() << " + " << *v->flat_index() << ")"; + os() << "__ldg(" << *v->base_handle() << " + " << *v->flat_index() << ')'; } // TODO: maybe this should be a more shared location? @@ -412,9 +412,9 @@ void CudaPrinter::visit(const StorePtr& v) { if (v->indices().empty()) { os() << *v->base_handle() << " = "; } else { - os() << *v->base_handle() << "[" << *v->flat_index() << "] = "; + os() << *v->base_handle() << '[' << *v->flat_index() << "] = "; } - os() << *v->value() << ";"; + os() << *v->value() << ';'; os() << '\n'; } @@ -422,10 +422,10 @@ void CudaPrinter::visit(const AtomicAddPtr& v) { emitIndent(); if (cuda_analysis_->thread_local_bufs().count(v->base_handle()) > 0) { // atomicAdd only works on global and shared memory - os() << *v->base_handle() << "[" << *v->flat_index() - << "] += " << *v->value() << ";"; + os() << *v->base_handle() << '[' << *v->flat_index() + << "] += " << *v->value() << ';'; } else { - os() << "atomicAdd(&" << *v->base_handle() << "[" << *v->flat_index() << "]" + os() << "atomicAdd(&" << *v->base_handle() << '[' << *v->flat_index() << ']' << ", " << *v->value() << ");"; } os() << '\n'; @@ -438,9 +438,9 @@ void CudaPrinter::visit(const MaxPtr& v) { os() << "maximum("; } v->lhs()->accept(this); - os() << ","; + os() << ','; v->rhs()->accept(this); - os() << ")"; + os() << ')'; } void CudaPrinter::visit(const MinPtr& v) { @@ -450,9 +450,9 @@ void CudaPrinter::visit(const MinPtr& v) { os() << "minimum("; } v->lhs()->accept(this); - os() << ","; + os() << ','; v->rhs()->accept(this); - os() << ")"; + os() << ')'; } void CudaPrinter::visit(const IfThenElsePtr& v) { @@ -462,11 +462,11 @@ void CudaPrinter::visit(const IfThenElsePtr& v) { v->true_value()->accept(this); os() << " : "; v->false_value()->accept(this); - os() << ")"; + os() << ')'; } void CudaPrinter::visit(const BlockPtr& v) { - os() << "{" << '\n'; + os() << '{' << '\n'; indent_++; for (const StmtPtr& s : v->stmts()) { @@ -475,15 +475,15 @@ void CudaPrinter::visit(const BlockPtr& v) { indent_--; emitIndent(); - os() << "}"; + os() << '}'; } void CudaPrinter::visit(const LetPtr& v) { emitIndent(); os() << dtypeToCppString(v->var()->dtype()); - os() << " " << *v->var() << " = "; + os() << ' ' << *v->var() << " = "; v->value()->accept(this); - os() << ";" << '\n'; + os() << ';' << '\n'; } class PrioritizeLoad : public IRMutator { @@ -911,7 +911,7 @@ void CudaCodeGen::Initialize() { // https://clang.llvm.org/docs/AttributeReference.html#amdgpu-flat-work-group-size os() << "__attribute__((amdgpu_flat_work_group_size(1, 1024)))" << std::endl; #endif - os() << "void " << func_name << "("; + os() << "void " << func_name << '('; const std::vector buffer_args = this->buffer_args(); for (size_t i = 0; i < buffer_args.size(); i++) { if (i > 0) { @@ -932,7 +932,7 @@ void CudaCodeGen::Initialize() { rand_seed = alloc("rand_seed", kInt); rand_offset = alloc("rand_offset", kInt); std::string uint64_str = "unsigned long long"; - os() << ", " << uint64_str << " " << *rand_seed << ", " << uint64_str << " " + os() << ", " << uint64_str << ' ' << *rand_seed << ", " << uint64_str << ' ' << *rand_offset; } os() << ") {"; @@ -942,7 +942,7 @@ void CudaCodeGen::Initialize() { VarPtr idx = alloc("idx", kInt); os() << "int " << *idx << " = blockIdx.x*blockDim.x + threadIdx.x;" << '\n'; VarPtr rand_func = printer_->rand_func(); - os() << "Philox " << *rand_func << "(" << *rand_seed << ", " << *idx << ", " + os() << "Philox " << *rand_func << '(' << *rand_seed << ", " << *idx << ", " << *rand_offset << ");" << '\n'; os() << '\n'; } @@ -969,7 +969,7 @@ void CudaCodeGen::Initialize() { stmt_v->accept(printer_.get()); os() << '\n'; - os() << "}"; + os() << '}'; // Check that all block extents had been set. const std::vector& gpu_block_extents = diff --git a/torch/csrc/jit/tensorexpr/ir_printer.cpp b/torch/csrc/jit/tensorexpr/ir_printer.cpp index 9b2ecd0e115..31b7866a73d 100644 --- a/torch/csrc/jit/tensorexpr/ir_printer.cpp +++ b/torch/csrc/jit/tensorexpr/ir_printer.cpp @@ -71,21 +71,21 @@ static void visitBinaryOp( int rhs_prec = getPrecedence(v->rhs()->expr_type()); if (lhs_prec >= self_prec) { - os << "("; + os << '('; } v->lhs()->accept(printer); if (lhs_prec >= self_prec) { - os << ")"; + os << ')'; } - os << " " << op_str << " "; + os << ' ' << op_str << ' '; if (rhs_prec >= self_prec) { - os << "("; + os << '('; } v->rhs()->accept(printer); if (rhs_prec >= self_prec) { - os << ")"; + os << ')'; } } @@ -129,7 +129,7 @@ void IRPrinter::visit(const ModPtr& v) { if (v->dtype().is_integral()) { visitBinaryOp(v, "%", this); } else if (v->dtype().is_floating_point()) { - os() << "mod(" << *v->lhs() << ", " << *v->rhs() << ")"; + os() << "mod(" << *v->lhs() << ", " << *v->rhs() << ')'; } else { throw std::runtime_error("invalid dtype: " + std::to_string(v->dtype())); } @@ -140,7 +140,7 @@ void IRPrinter::visit(const MaxPtr& v) { v->lhs()->accept(this); os() << ", "; v->rhs()->accept(this); - os() << ", " << (unsigned int)v->propagate_nans() << ")"; + os() << ", " << (unsigned int)v->propagate_nans() << ')'; } void IRPrinter::visit(const MinPtr& v) { @@ -148,7 +148,7 @@ void IRPrinter::visit(const MinPtr& v) { v->lhs()->accept(this); os() << ", "; v->rhs()->accept(this); - os() << ", " << (unsigned int)v->propagate_nans() << ")"; + os() << ", " << (unsigned int)v->propagate_nans() << ')'; } void IRPrinter::visit(const CompareSelectPtr& v) { @@ -158,32 +158,32 @@ void IRPrinter::visit(const CompareSelectPtr& v) { int rhs_prec = getPrecedence(v->rhs()->expr_type()); if (lhs_prec >= self_prec) { - os() << "("; + os() << '('; } v->lhs()->accept(this); if (lhs_prec >= self_prec) { - os() << ")"; + os() << ')'; } os() << to_string(cmp_op); if (rhs_prec >= self_prec) { - os() << "("; + os() << '('; } v->rhs()->accept(this); if (rhs_prec >= self_prec) { - os() << ")"; + os() << ')'; } os() << " ? "; auto withParens = [&](const ExprPtr& e) { auto prec = getPrecedence(e->expr_type()); if (prec >= self_prec) { - os() << "("; + os() << '('; } e->accept(this); if (prec >= self_prec) { - os() << ")"; + os() << ')'; } }; withParens(v->ret_val1()); @@ -237,16 +237,16 @@ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT) void IRPrinter::visit(const CastPtr& v) { auto dtype = v->dtype(); - os() << dtypeToCppString(dtype) << "("; + os() << dtypeToCppString(dtype) << '('; v->src_value()->accept(this); - os() << ")"; + os() << ')'; } void IRPrinter::visit(const BitCastPtr& v) { auto dtype = v->dtype(); os() << "BitCast<" << dtype.ToCppString() << ">("; v->src_value()->accept(this); - os() << ")"; + os() << ')'; } void IRPrinter::visit(const VarPtr& v) { @@ -273,7 +273,7 @@ void IRPrinter::visit(const BufPtr& v) { } s->accept(this); } - os() << "]"; + os() << ']'; os() << ", strides=["; i = 0; for (const ExprPtr& s : v->strides()) { @@ -282,14 +282,14 @@ void IRPrinter::visit(const BufPtr& v) { } s->accept(this); } - os() << "]"; + os() << ']'; - os() << ")"; + os() << ')'; } void IRPrinter::visit(const RampPtr& v) { os() << "Ramp(" << *v->base() << ", " << *v->stride() << ", " << v->lanes() - << ")"; + << ')'; } void IRPrinter::visit(const LoadPtr& v) { @@ -297,7 +297,7 @@ void IRPrinter::visit(const LoadPtr& v) { if (v->indices().empty()) { os() << *v->base_handle(); } else { - os() << *v->base_handle() << "["; + os() << *v->base_handle() << '['; size_t i = 0; for (const ExprPtr& ind : v->indices()) { if (i++) { @@ -306,40 +306,40 @@ void IRPrinter::visit(const LoadPtr& v) { ind->accept(this); } if (v->indices().empty()) { - os() << "0"; + os() << '0'; } - os() << "]"; + os() << ']'; } } void IRPrinter::visit(const BroadcastPtr& v) { - os() << "Broadcast(" << *v->value() << ", " << v->lanes() << ")"; + os() << "Broadcast(" << *v->value() << ", " << v->lanes() << ')'; } void IRPrinter::visit(const IfThenElsePtr& v) { os() << "IfThenElse(" << *v->condition() << ", " << *v->true_value() << ", " - << *v->false_value() << ")"; + << *v->false_value() << ')'; } void IRPrinter::visit(const IntrinsicsPtr& v) { - os() << v->func_name() << "("; + os() << v->func_name() << '('; for (const auto i : c10::irange(v->nparams())) { if (i > 0) { os() << ", "; } os() << *v->param(i); } - os() << ")"; + os() << ')'; } void IRPrinter::visit(const TermPtr& v) { os() << "Term("; v->scalar()->accept(this); for (const auto& t : v->variables()) { - os() << ","; + os() << ','; t->accept(this); } - os() << ")"; + os() << ')'; } void IRPrinter::visit(const PolynomialPtr& v) { @@ -357,7 +357,7 @@ void IRPrinter::visit(const PolynomialPtr& v) { os() << " + "; } v->scalar()->accept(this); - os() << ")"; + os() << ')'; } void IRPrinter::visit(const RoundOffPtr& v) { @@ -365,7 +365,7 @@ void IRPrinter::visit(const RoundOffPtr& v) { v->lhs()->accept(this); os() << ", "; v->rhs()->accept(this); - os() << ")"; + os() << ')'; } void IRPrinter::visit(const MaxTermPtr& v) { @@ -380,7 +380,7 @@ void IRPrinter::visit(const MaxTermPtr& v) { os() << ", "; } } - os() << ")"; + os() << ')'; } void IRPrinter::visit(const MinTermPtr& v) { @@ -395,7 +395,7 @@ void IRPrinter::visit(const MinTermPtr& v) { os() << ", "; } } - os() << ")"; + os() << ')'; } void IRPrinter::visit(const ReduceOpPtr& v) { @@ -423,11 +423,11 @@ void IRPrinter::visit(const ReduceOpPtr& v) { void IRPrinter::visit(const StorePtr& v) { // TODO: handle the mask if (v->indices().empty()) { - os() << *v->base_handle() << " = " << *v->value() << ";"; + os() << *v->base_handle() << " = " << *v->value() << ';'; return; } - os() << *v->base_handle() << "["; + os() << *v->base_handle() << '['; size_t i = 0; for (const ExprPtr& ind : v->indices()) { if (i++) { @@ -436,15 +436,15 @@ void IRPrinter::visit(const StorePtr& v) { ind->accept(this); } if (v->indices().empty()) { - os() << "0"; + os() << '0'; } - os() << "] = " << *v->value() << ";"; + os() << "] = " << *v->value() << ';'; } void IRPrinter::visit(const ForPtr& v) { VarPtr var = v->var(); VarHandle vv(var); - os() << "for (" << dtypeToCppString(var->dtype()) << " " << vv << " = " + os() << "for (" << dtypeToCppString(var->dtype()) << ' ' << vv << " = " << ExprHandle(v->start()) << "; " << vv << " < " << ExprHandle(v->stop()) << "; " << vv << "++) "; std::string loop_options_str = v->loop_options().ToString(); @@ -464,11 +464,11 @@ void IRPrinter::visit(const BlockPtr& v) { for (const StmtPtr& s : *v) { emitIndent(); - os() << *s << "\n"; + os() << *s << '\n'; } indent_--; emitIndent(); - os() << "}"; + os() << '}'; } void IRPrinter::visit(const AllocatePtr& v) { @@ -482,7 +482,7 @@ void IRPrinter::visit(const AllocatePtr& v) { } os() << *dims[i]; } - os() << "]"; + os() << ']'; } void IRPrinter::visit(const FreePtr& v) { @@ -503,13 +503,13 @@ void IRPrinter::visit(const FreeExtPtr& v) { } void IRPrinter::visit(const PlacementAllocatePtr& v) { - os() << "Alias(" << *v->buf()->base_handle() << "," + os() << "Alias(" << *v->buf()->base_handle() << ',' << *v->buf_to_reuse()->base_handle() << ");"; } void IRPrinter::visit(const LetPtr& v) { - os() << dtypeToCppString(v->var()->dtype()) << " " << *v->var(); - os() << " = " << *v->value() << ";"; + os() << dtypeToCppString(v->var()->dtype()) << ' ' << *v->var(); + os() << " = " << *v->value() << ';'; } void IRPrinter::visit(const CondPtr& v) { @@ -530,7 +530,7 @@ void IRPrinter::visit(const CondPtr& v) { } void IRPrinter::visit(const AtomicAddPtr& v) { - os() << "atomicAdd(&" << *v->base_handle() << "["; + os() << "atomicAdd(&" << *v->base_handle() << '['; size_t i = 0; for (const ExprPtr& ind : v->indices()) { if (i++) { @@ -539,7 +539,7 @@ void IRPrinter::visit(const AtomicAddPtr& v) { ind->accept(this); } if (v->indices().empty()) { - os() << "0"; + os() << '0'; } os() << "], " << *v->value() << ");"; } @@ -549,7 +549,7 @@ void IRPrinter::visit(const SyncThreadsPtr& v) { } void IRPrinter::visit(const ExternalCallPtr& v) { - os() << *v->buf() << " = " << v->func_name() << "("; + os() << *v->buf() << " = " << v->func_name() << '('; os() << "buf_args={"; int i = 0; @@ -580,7 +580,7 @@ void IRPrinter::visit(const ExternalCallWithAllocPtr& v) { os() << *buf_out_arg; } - os() << " := " << v->func_name() << "("; + os() << " := " << v->func_name() << '('; os() << "buf_args={"; i = 0; @@ -657,7 +657,7 @@ void print(const ExprPtr& expr) { } else { std::cout << "(null expr)"; } - std::cout << "\n"; + std::cout << '\n'; } void print(const StmtPtr& stmt) { @@ -691,14 +691,14 @@ std::string to_string(const StmtPtr& stmt) { std::string to_string(const Tensor& t) { std::ostringstream oss; // TODO: move this to Buf printer - oss << "Tensor " << t.buf()->name_hint() << "["; + oss << "Tensor " << t.buf()->name_hint() << '['; for (const auto i : c10::irange(t.buf()->ndim())) { if (i != 0) { oss << ", "; } oss << *t.buf()->dim(i); } - oss << "]:\n" << *t.stmt() << "\n"; + oss << "]:\n" << *t.stmt() << '\n'; return oss.str(); } } // namespace std diff --git a/torch/csrc/jit/tensorexpr/loopnest.cpp b/torch/csrc/jit/tensorexpr/loopnest.cpp index 7f0888666d3..cca7efcd0ad 100644 --- a/torch/csrc/jit/tensorexpr/loopnest.cpp +++ b/torch/csrc/jit/tensorexpr/loopnest.cpp @@ -131,9 +131,9 @@ std::string sanitizeName(const std::string& input_name) { } else { if (i == 0) { // Don't start names with underscore - sanitized_name << "v"; + sanitized_name << 'v'; } - sanitized_name << "_"; + sanitized_name << '_'; } } return sanitized_name.str(); diff --git a/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp b/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp index 46a09314fb7..3dda98ff0fa 100644 --- a/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp +++ b/torch/csrc/jit/tensorexpr/loopnest_randomization.cpp @@ -733,7 +733,7 @@ void loopnestRandomization(int64_t seed, LoopNest& l) { } } catch (...) { std::cout << "EXCEPTION THROWN!\n"; - std::cout << "SEED: " << seed << "\n"; + std::cout << "SEED: " << seed << '\n'; throw std::runtime_error("Random test failed"); } message = "End of transformations;\n"; diff --git a/torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp b/torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp index 73a1c6a4a2d..bbd43f0fa8a 100644 --- a/torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp +++ b/torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp @@ -151,7 +151,7 @@ bool AccessInfo::isWrite() const { } void AccessInfo::print() const { - std::cout << id_ << ". " << AccessToString(type_) << ": " << *var_ << "["; + std::cout << id_ << ". " << AccessToString(type_) << ": " << *var_ << '['; if (!bounds_.empty()) { for (size_t i = 0; i < bounds_.size() - 1; ++i) { bounds_[i].print(); @@ -161,30 +161,30 @@ void AccessInfo::print() const { size_t i = bounds_.size() - 1; bounds_[i].print(); } - std::cout << "]"; + std::cout << ']'; if (!dependencies_.empty()) { std::cout << " - depends on: "; for (auto& pair : dependencies_) { - std::cout << pair.second->id() << " "; + std::cout << pair.second->id() << ' '; } } if (!dependents_.empty()) { std::cout << " - dependents: "; for (auto& pair : dependents_) { - std::cout << pair.second.lock()->id() << " "; + std::cout << pair.second.lock()->id() << ' '; } } - std::cout << "\n"; + std::cout << '\n'; } void AccessInfo::dumpDOT(std::ostream& os) const { if (type_ == AccessType::Input || type_ == AccessType::Output || type_ == AccessType::Alloc) { - os << "n" << id_ << " [\n"; - os << "label = \"" << AccessToString(type_) << "\\n " << *var_ << "["; + os << 'n' << id_ << " [\n"; + os << "label = \"" << AccessToString(type_) << "\\n " << *var_ << '['; if (!bounds_.empty()) { for (size_t i = 0; i < bounds_.size() - 1; ++i) { os << *IRSimplifier::simplify( @@ -203,17 +203,17 @@ void AccessInfo::dumpDOT(std::ostream& os) const { os << "\tshape = \"house\"\n"; } } else { - os << "n" << id_ << " [\n"; + os << 'n' << id_ << " [\n"; os << "label = \"" << AccessToString(type_) << " (#" << id_ << ")\\n"; os << "buf : " << *var_ << "\\n"; os << "bounds : ["; if (!bounds_.empty()) { for (size_t i = 0; i < bounds_.size() - 1; ++i) { - os << "(" << *bounds_[i].start << ", " << *bounds_[i].end << "), "; + os << '(' << *bounds_[i].start << ", " << *bounds_[i].end << "), "; } size_t i = bounds_.size() - 1; - os << "(" << *bounds_[i].start << ", " << *bounds_[i].end << ")]"; + os << '(' << *bounds_[i].start << ", " << *bounds_[i].end << ")]"; } os << "\"\n"; os << "\tshape = \"box\"\n"; @@ -228,8 +228,8 @@ void AccessInfo::dumpDOT(std::ostream& os) const { } os << "]\n"; for (auto& pair : dependencies_) { - os << "n" << pair.second->id() << " -> " - << "n" << id_ << " [color=\"" << edgeColour << "\"]\n"; + os << 'n' << pair.second->id() << " -> " << 'n' << id_ << " [color=\"" + << edgeColour << "\"]\n"; } } diff --git a/torch/csrc/jit/tensorexpr/registerizer.cpp b/torch/csrc/jit/tensorexpr/registerizer.cpp index 37f79d52923..9ad44e31a38 100644 --- a/torch/csrc/jit/tensorexpr/registerizer.cpp +++ b/torch/csrc/jit/tensorexpr/registerizer.cpp @@ -131,17 +131,17 @@ std::shared_ptr AccessInfo::cloneWithHiddenInfo( } void AccessInfo::print() const { - std::cout << "Access: " << *buf_ << "{"; + std::cout << "Access: " << *buf_ << '{'; for (const auto& i : indices_) { - std::cout << *i << " "; + std::cout << *i << ' '; } std::cout << "} stores: " << stores_.size() << " (" << *store_cost_ << ") -"; - std::cout << " loads: " << loads_.size() << " (" << *load_cost_ << ")"; + std::cout << " loads: " << loads_.size() << " (" << *load_cost_ << ')'; if (conditionId_) { std::cout << " cond: " << conditionId_; } - std::cout << "\n"; + std::cout << '\n'; } // Scope diff --git a/torch/csrc/jit/tensorexpr/types.cpp b/torch/csrc/jit/tensorexpr/types.cpp index 0ee8fd4a956..f3a62fa3740 100644 --- a/torch/csrc/jit/tensorexpr/types.cpp +++ b/torch/csrc/jit/tensorexpr/types.cpp @@ -57,7 +57,7 @@ Dtype ToDtype(ScalarType type) { TORCH_API std::ostream& operator<<(std::ostream& stream, const Dtype& dtype) { stream << dtype.scalar_type_; if (dtype.lanes() > 1) { - stream << "x" << dtype.lanes(); + stream << 'x' << dtype.lanes(); ; } return stream; diff --git a/torch/csrc/jit/testing/file_check.cpp b/torch/csrc/jit/testing/file_check.cpp index aeac1233e4d..fb1280400a8 100644 --- a/torch/csrc/jit/testing/file_check.cpp +++ b/torch/csrc/jit/testing/file_check.cpp @@ -116,7 +116,7 @@ size_t assertFind( const std::string& sub, const Check& check) { return assertFind(search_range, sub, [&](std::ostream& out) { - out << "From " << check << "\n"; + out << "From " << check << '\n'; }); } @@ -156,7 +156,7 @@ size_t assertFindRegex( const std::string& sub, const Check& check) { return assertFindRegex(search_range, sub, [&](std::ostream& out) { - out << "From " << check << "\n"; + out << "From " << check << '\n'; }); } @@ -182,7 +182,7 @@ void assertNotFind( c10::printQuotedString(ss, sub); ss << " but found it\n"; found_range.highlight(ss); - ss << "From " << check << "\n"; + ss << "From " << check << '\n'; throw std::runtime_error(ss.str()); } } @@ -543,7 +543,7 @@ FileCheck::FileCheck() : fcImpl(new FileCheckImpl()) {} std::ostream& operator<<(std::ostream& out, const FileCheckImpl& fc) { out << "FileCheck checks:\n"; for (const Check& c : fc.checks) { - out << "\t" << c << "\n"; + out << '\t' << c << '\n'; } return out; } diff --git a/torch/csrc/lazy/core/debug_util.cpp b/torch/csrc/lazy/core/debug_util.cpp index 3cc35c6d0cf..2eb448e75f6 100644 --- a/torch/csrc/lazy/core/debug_util.cpp +++ b/torch/csrc/lazy/core/debug_util.cpp @@ -77,7 +77,7 @@ std::string GetFirstUserFrameInPython() { auto& loc = frames[i - 1]; if (loc.file.find("site-packages") == std::string::npos) { std::stringstream ss; - ss << loc.file << " " << loc.function << " " << loc.line; + ss << loc.file << ' ' << loc.function << ' ' << loc.line; return ss.str(); } } @@ -120,7 +120,7 @@ std::string DebugUtil::GetTensorsGraphInfo( std::vector frames = GetPythonFramesFunction()(); ss << "Python Stacktrace:\n"; for (auto& location : frames) { - ss << " " << location.function << " (" << location.file << ":" + ss << " " << location.function << " (" << location.file << ':' << location.line << ")\n"; } ss << "\nHashes: ("; @@ -160,7 +160,7 @@ void DebugUtil::SaveTensorsGraphInfo( std::string info = GetTensorsGraphInfo(tensors, indices, format); std::lock_guard guard(lock); std::ofstream graph_file(save_file, std::ios_base::app); - graph_file << "[" << name << "]\n" << info << "\n"; + graph_file << '[' << name << "]\n" << info << '\n'; } } diff --git a/torch/csrc/lazy/core/ir.cpp b/torch/csrc/lazy/core/ir.cpp index 709b5b028b2..3cd25d2f5e8 100644 --- a/torch/csrc/lazy/core/ir.cpp +++ b/torch/csrc/lazy/core/ir.cpp @@ -143,7 +143,7 @@ const Output& Node::nullable_operand(size_t i) const { std::string Node::ToString() const { std::stringstream ss; - ss << shapes() << " " << op(); + ss << shapes() << ' ' << op(); if (num_outputs() > 1) { ss << ", num_outputs=" << num_outputs(); } diff --git a/torch/csrc/lazy/core/ir_dump_util.cpp b/torch/csrc/lazy/core/ir_dump_util.cpp index 3f33c4fce22..b7f95968245 100644 --- a/torch/csrc/lazy/core/ir_dump_util.cpp +++ b/torch/csrc/lazy/core/ir_dump_util.cpp @@ -137,7 +137,7 @@ std::string GenerateDotNodeLabel( std::stringstream ss; ss << node->op() << "\\n" << node->shape(); for (auto& tag : GetNodeTags(node)) { - ss << "\\n" << tag.name << "="; + ss << "\\n" << tag.name << '='; if (tag.value.size() < kMaxValueSize) { ss << tag.value; } else { @@ -155,27 +155,27 @@ std::string GenerateDotNodeSpec( const Node* node, const std::unordered_map& roots_ids) { std::stringstream ss; - ss << "label=\"" << GenerateDotNodeLabel(node, roots_ids) << "\""; + ss << "label=\"" << GenerateDotNodeLabel(node, roots_ids) << '"'; return ss.str(); } std::string GenerateTextNodeSpec(const Node* node, const NodeIdMap& id_map) { std::stringstream ss; - ss << node->shapes() << " " << node->op() << "("; + ss << node->shapes() << ' ' << node->op() << '('; size_t count = 0; for (auto& output : node->operands()) { if (count > 0) { ss << ", "; } - ss << "%" << id_map.at(output.node); + ss << '%' << id_map.at(output.node); if (output.node->num_outputs() > 1) { - ss << "." << output.index; + ss << '.' << output.index; } ++count; } - ss << ")"; + ss << ')'; for (auto& tag : GetNodeTags(node)) { - ss << ", " << tag.name << "=" << tag.value; + ss << ", " << tag.name << '=' << tag.value; } return ss.str(); } @@ -214,7 +214,7 @@ std::string DumpUtil::PostOrderToDot( if (output.node->num_outputs() > 1) { ss << " [label=\"o=" << output.index << "\"]"; } - ss << "\n"; + ss << '\n'; } } } @@ -242,7 +242,7 @@ std::string DumpUtil::PostOrderToText( ss << ", ROOT=" << *opt_root_id; } ss << ", NodeType=" << typeid(*node).name(); - ss << "\n"; + ss << '\n'; } ss << "}\n"; return ss.str(); diff --git a/torch/csrc/lazy/core/ir_metadata.cpp b/torch/csrc/lazy/core/ir_metadata.cpp index 50aedaca029..5da2860ed6c 100644 --- a/torch/csrc/lazy/core/ir_metadata.cpp +++ b/torch/csrc/lazy/core/ir_metadata.cpp @@ -16,8 +16,8 @@ void EmitShortFrameInfo( } else { ++pos; } - stream << ", location=" << frame.function << "@" << frame.file.substr(pos) - << ":" << frame.line; + stream << ", location=" << frame.function << '@' << frame.file.substr(pos) + << ':' << frame.line; } } @@ -26,7 +26,7 @@ std::ostream& operator<<( const std::vector& frames) { stream << "Frames:\n"; for (auto& location : frames) { - stream << " " << location.function << " (" << location.file << ":" + stream << " " << location.function << " (" << location.file << ':' << location.line << ")\n"; } return stream; diff --git a/torch/csrc/lazy/core/lazy_graph_executor.cpp b/torch/csrc/lazy/core/lazy_graph_executor.cpp index c440357f9e1..413601f70af 100644 --- a/torch/csrc/lazy/core/lazy_graph_executor.cpp +++ b/torch/csrc/lazy/core/lazy_graph_executor.cpp @@ -404,7 +404,7 @@ void LazyGraphExecutor::SyncLiveTensorsGraph( bool wait) { auto tensors = GetLiveTensors(device); VLOG(4) << tensors.size() << " live tensors: devices=(" - << c10::Join(", ", devices) << ")"; + << c10::Join(", ", devices) << ')'; SyncTensorsGraph(&tensors, devices, wait, /*sync_ltc_data=*/true); } diff --git a/torch/csrc/lazy/core/shape_inference.cpp b/torch/csrc/lazy/core/shape_inference.cpp index e7ab494d18e..ada3a2fed16 100644 --- a/torch/csrc/lazy/core/shape_inference.cpp +++ b/torch/csrc/lazy/core/shape_inference.cpp @@ -85,7 +85,7 @@ static std::vector expand_param_if_needed( std::ostringstream ss; ss << "expected " << param_name << " to be a single integer value or a " << "list of " << expected_dim << " values to match the convolution " - << "dimensions, but got " << param_name << "=" << list_param; + << "dimensions, but got " << param_name << '=' << list_param; TORCH_CHECK(false, ss.str()); } else { return list_param.vec(); diff --git a/torch/csrc/lazy/core/trie.cpp b/torch/csrc/lazy/core/trie.cpp index a4a5d6f0c8b..e0e657aae13 100644 --- a/torch/csrc/lazy/core/trie.cpp +++ b/torch/csrc/lazy/core/trie.cpp @@ -19,7 +19,7 @@ void TraverseTrie(TrieNode* node, std::stringstream& ss) { << ", " << node->hit_counter << " hits\"]\n"; } for (auto& successor : node->successors) { - ss << node->unique_id << " -> " << successor->unique_id << "\n"; + ss << node->unique_id << " -> " << successor->unique_id << '\n'; TraverseTrie(successor.get(), ss); } } diff --git a/torch/csrc/monitor/counters.h b/torch/csrc/monitor/counters.h index 65a0f516a58..046c63a78ed 100644 --- a/torch/csrc/monitor/counters.h +++ b/torch/csrc/monitor/counters.h @@ -226,7 +226,7 @@ class Stat { for (auto& kv : stats) { std::stringstream key; key << name_; - key << "."; + key << '.'; key << aggregationName(kv.first); e.data[key.str()] = kv.second; } diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp index ec9994e15ec..524b84070cb 100644 --- a/torch/csrc/profiler/kineto_shim.cpp +++ b/torch/csrc/profiler/kineto_shim.cpp @@ -201,13 +201,13 @@ class ExperimentalConfigWrapper { for (size_t i = 0; i < num_metrics; i++) { configss << config_.profiler_metrics[i]; if (num_metrics > 1 && i < (num_metrics - 1)) { - configss << ","; + configss << ','; } } configss << "\nCUPTI_PROFILER_ENABLE_PER_KERNEL=" << (config_.profiler_measure_per_kernel ? "true" : "false") - << "\n"; - configss << "CUSTOM_CONFIG=" << config_.custom_profiler_config << "\n"; + << '\n'; + configss << "CUSTOM_CONFIG=" << config_.custom_profiler_config << '\n'; LOG(INFO) << "Generated config = " << configss.str(); libkineto::api().activityProfiler().prepareTrace( @@ -236,8 +236,8 @@ static const std::string setTraceID(const std::string& trace_id) { return ""; } std::stringstream configss; - configss << "REQUEST_TRACE_ID=" << trace_id << "\n"; - configss << "REQUEST_GROUP_TRACE_ID=" << trace_id << "\n"; + configss << "REQUEST_TRACE_ID=" << trace_id << '\n'; + configss << "REQUEST_GROUP_TRACE_ID=" << trace_id << '\n'; return configss.str(); } @@ -249,7 +249,7 @@ static const std::string appendCustomConfig( } std::stringstream configss; configss << config; - configss << "CUSTOM_CONFIG=" << custom_profiler_config << "\n"; + configss << "CUSTOM_CONFIG=" << custom_profiler_config << '\n'; return configss.str(); } #endif diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp index 5edc59c893d..29b2b94af44 100644 --- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp +++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp @@ -279,7 +279,7 @@ static std::ofstream openOutputFile(const std::string& name) { std::ofstream stream; stream.open(name, std::ofstream::out | std::ofstream::trunc); if (!stream) { - LOG(ERROR) << "Failed to open '" << name << "'"; + LOG(ERROR) << "Failed to open '" << name << '\''; } else { VLOG(1) << "PyTorch Execution Trace: writing to " << name; } @@ -754,7 +754,7 @@ static void recordOperatorStart( RecordScope::USER_SCOPE), tid, 0); // fw_tid - ob.out << ","; + ob.out << ','; } } @@ -928,7 +928,7 @@ static void onFunctionExit(const RecordFunction& fn, ObserverContext* ctx_ptr) { fc.kernelFile, fc.get_string_for_tensor_range(), additiona_attrs); - ob->out << ","; + ob->out << ','; } } catch (const std::exception& e) { LOG(WARNING) << "Exception in execution trace observer: [" << fc.name @@ -977,7 +977,7 @@ bool addExecutionTraceObserver(const std::string& output_file_path) { // 5 is the length of ".json" ob.resourceDir.replace(ext_pos, 5, "_resources/"); VLOG(1) << "Execution trace resource directory: " << ob.resourceDir - << "\n"; + << '\n'; } else { LOG(WARNING) << "Execution trace output file does not end with \".json\"."; diff --git a/torch/csrc/profiler/stubs/cuda.cpp b/torch/csrc/profiler/stubs/cuda.cpp index 2b634b0303c..45c288b976a 100644 --- a/torch/csrc/profiler/stubs/cuda.cpp +++ b/torch/csrc/profiler/stubs/cuda.cpp @@ -21,7 +21,7 @@ namespace { static void cudaCheck(cudaError_t result, const char* file, int line) { if (result != cudaSuccess) { std::stringstream ss; - ss << file << ":" << line << ": "; + ss << file << ':' << line << ": "; if (result == cudaErrorInitializationError) { // It is common for users to use DataLoader with multiple workers // and the autograd profiler. Throw a nice error message here. diff --git a/torch/csrc/profiler/unwind/action.h b/torch/csrc/profiler/unwind/action.h index 1a8373d9dfe..5a982cfd046 100644 --- a/torch/csrc/profiler/unwind/action.h +++ b/torch/csrc/profiler/unwind/action.h @@ -40,16 +40,16 @@ struct Action { friend std::ostream& operator<<(std::ostream& out, const Action& self) { switch (self.kind) { case A_UNDEFINED: - out << "u"; + out << 'u'; break; case A_REG_PLUS_DATA: - out << "r" << (int)self.reg << " + " << self.data; + out << 'r' << (int)self.reg << " + " << self.data; break; case A_REG_PLUS_DATA_DEREF: - out << "*(r" << (int)self.reg << " + " << self.data << ")"; + out << "*(r" << (int)self.reg << " + " << self.data << ')'; break; case A_LOAD_CFA_OFFSET: - out << "*(cfa + " << self.data << ")"; + out << "*(cfa + " << self.data << ')'; break; } return out; diff --git a/torch/csrc/profiler/unwind/eh_frame_hdr.h b/torch/csrc/profiler/unwind/eh_frame_hdr.h index 740f4beb2c8..5884685433b 100644 --- a/torch/csrc/profiler/unwind/eh_frame_hdr.h +++ b/torch/csrc/profiler/unwind/eh_frame_hdr.h @@ -81,7 +81,7 @@ struct EHFrameHdr { friend std::ostream& operator<<(std::ostream& out, const EHFrameHdr& self) { out << "EHFrameHeader(version=" << self.version_ << ",table_size=" << self.table_size_ - << ",fde_count=" << self.fde_count_ << ")"; + << ",fde_count=" << self.fde_count_ << ')'; return out; } diff --git a/torch/csrc/profiler/unwind/fde.h b/torch/csrc/profiler/unwind/fde.h index 083578ec391..ffb06b5ab1f 100644 --- a/torch/csrc/profiler/unwind/fde.h +++ b/torch/csrc/profiler/unwind/fde.h @@ -17,7 +17,7 @@ struct TableState { out << "cfa = " << self.cfa << "; "; for (auto r : c10::irange(self.registers.size())) { if (self.registers.at(r).kind != A_UNDEFINED) { - out << "r" << r << " = " << self.registers.at(r) << "; "; + out << 'r' << r << " = " << self.registers.at(r) << "; "; } } return out; @@ -110,21 +110,21 @@ struct FDE { auto previous_pc = current_pc_; current_pc_ += amount; if (LOG) { - (*out_) << (void*)(previous_pc - load_bias_) << "-" - << (void*)(current_pc_ - load_bias_) << ": " << state() << "\n"; + (*out_) << (void*)(previous_pc - load_bias_) << '-' + << (void*)(current_pc_ - load_bias_) << ": " << state() << '\n'; } } void advance_loc(int64_t amount) { if (LOG) { - (*out_) << "advance_loc " << amount << "\n"; + (*out_) << "advance_loc " << amount << '\n'; } advance_raw(amount * code_alignment_factor_); } void offset(int64_t reg, int64_t offset) { if (LOG) { - (*out_) << "offset " << reg << " " << offset << "\n"; + (*out_) << "offset " << reg << ' ' << offset << '\n'; } if (reg > (int64_t)state().registers.size()) { if (LOG) { @@ -138,7 +138,7 @@ struct FDE { void restore(int64_t reg) { if (LOG) { - (*out_) << "restore " << reg << "\n"; + (*out_) << "restore " << reg << '\n'; } if (reg > (int64_t)state().registers.size()) { if (LOG) { @@ -151,7 +151,7 @@ struct FDE { void def_cfa(int64_t reg, int64_t off) { if (LOG) { - (*out_) << "def_cfa " << reg << " " << off << "\n"; + (*out_) << "def_cfa " << reg << ' ' << off << '\n'; } last_reg_ = reg; last_offset_ = off; @@ -179,13 +179,13 @@ struct FDE { void undefined(int64_t reg) { if (LOG) { - (*out_) << "undefined " << reg << "\n"; + (*out_) << "undefined " << reg << '\n'; } state().registers.at(reg) = Action::undefined(); } void register_(int64_t reg, int64_t rhs_reg) { if (LOG) { - (*out_) << "register " << reg << " " << rhs_reg << "\n"; + (*out_) << "register " << reg << ' ' << rhs_reg << '\n'; } state().registers.at(reg) = Action::regPlusData(static_cast(reg), 0); @@ -214,7 +214,7 @@ struct FDE { if (LOG) { // NOLINTNEXTLINE(performance-no-int-to-ptr) (*out_) << "readUpTo " << (void*)addr << " for " << library_name_ - << " at " << (void*)load_bias_ << "\n"; + << " at " << (void*)load_bias_ << '\n'; } state_stack_.emplace_back(); current_pc_ = low_pc_; @@ -245,8 +245,8 @@ struct FDE { } void dumpAddr2Line() { - std::cout << "addr2line -f -e " << library_name_ << " " - << (void*)(low_pc_ - load_bias_) << "\n"; + std::cout << "addr2line -f -e " << library_name_ << ' ' + << (void*)(low_pc_ - load_bias_) << '\n'; } void readInstruction(Lexer& L) { diff --git a/torch/csrc/profiler/unwind/unwind.cpp b/torch/csrc/profiler/unwind/unwind.cpp index 2b30df4e2a6..db7e8a60e4a 100644 --- a/torch/csrc/profiler/unwind/unwind.cpp +++ b/torch/csrc/profiler/unwind/unwind.cpp @@ -354,7 +354,7 @@ struct Symbolizer { entry.queried.push_back(addr); auto libaddress = maybe_library->second - 1; // NOLINTNEXTLINE(performance-no-int-to-ptr) - entry.comm->out() << (void*)libaddress << "\n"; + entry.comm->out() << (void*)libaddress << '\n'; // we need to make sure we don't write more than 64k bytes to // a pipe before reading the results. Otherwise the buffer may // get filled and block before we read the results. diff --git a/torch/csrc/profiler/util.cpp b/torch/csrc/profiler/util.cpp index d266958e2cb..b547bc528da 100644 --- a/torch/csrc/profiler/util.cpp +++ b/torch/csrc/profiler/util.cpp @@ -145,7 +145,7 @@ std::vector callstackStr(const std::vector& cs) { cs_str.reserve(cs.size()); for (const auto& entry : cs) { std::stringstream loc; - loc << entry.filename << "(" << entry.line << "): " << entry.funcname; + loc << entry.filename << '(' << entry.line << "): " << entry.funcname; cs_str.push_back(loc.str()); } return cs_str; @@ -310,11 +310,11 @@ std::string ivalueToStr(const c10::IValue& val, bool isString) { } else { ss.str(""); if (isString) { - ss << "\""; + ss << '"'; } ss << val; if (isString) { - ss << "\""; + ss << '"'; } std::string mystr = ss.str(); @@ -934,7 +934,7 @@ int getTensorStartHint(const at::Tensor& t) { bool checkFunctionOutputsForLogging(const at::RecordFunction& fn) { const auto& outputs = fn.outputs(); auto num_outputs = fn.num_outputs(); - VLOG(2) << "outputs: " << num_outputs << " " << outputs.size() << '\n'; + VLOG(2) << "outputs: " << num_outputs << ' ' << outputs.size() << '\n'; // We have two cases: for unboxed kernel, we have num_outputs == // outputs.size() for boxed kernel using stack, there could be more elements // on the stack from previous ops. @@ -948,7 +948,7 @@ bool checkFunctionOutputsForLogging(const at::RecordFunction& fn) { bool checkFunctionInputsForLogging(const at::RecordFunction& fn) { auto num_inputs = fn.num_inputs(); const auto inputs = fn.inputs(); - VLOG(2) << "inputs: " << num_inputs << " " << inputs.size() << '\n'; + VLOG(2) << "inputs: " << num_inputs << ' ' << inputs.size() << '\n'; // We have two cases: for unboxed kernel, we have num_inputs == // inputs.size() for boxed kernel using stack, there could be more elements // on the stack from previous ops. diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index ad418955e05..d4c810d95c6 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -218,7 +218,7 @@ static void py_initialize_tensor_type( static std::string get_name(Backend backend, ScalarType scalarType) { std::ostringstream ss; - ss << torch::utils::backend_to_string(backend) << "." << toString(scalarType) + ss << torch::utils::backend_to_string(backend) << '.' << toString(scalarType) << "Tensor"; return ss.str(); } diff --git a/torch/csrc/utils/python_arg_parser.cpp b/torch/csrc/utils/python_arg_parser.cpp index 79994eeb862..e89f7887320 100644 --- a/torch/csrc/utils/python_arg_parser.cpp +++ b/torch/csrc/utils/python_arg_parser.cpp @@ -663,20 +663,20 @@ auto handle_torch_function_no_python_arg_parser( std::stringstream ss; ss << "Multiple dispatch failed for '"; if (module_name && func_name) { - ss << module_name << "." << func_name; + ss << module_name << '.' << func_name; } else { py::handle fn = torch_api_function; - ss << py::str(fn.attr("__module__")) << "." + ss << py::str(fn.attr("__module__")) << '.' << py::str(fn.attr("__name__")); } ss << "'; all " << torch_function_name_str << " handlers returned NotImplemented:\n\n"; if (mode_obj) { - ss << " - mode object " << py::repr(mode_obj) << "\n"; + ss << " - mode object " << py::repr(mode_obj) << '\n'; } for (auto& arg : overloaded_args) { ss << " - tensor subclass " << py::repr(get_type_of_overloaded_arg(arg)) - << "\n"; + << '\n'; } ss << "\nFor more information, try re-running with TORCH_LOGS=not_implemented"; const std::string& tmp = ss.str(); @@ -1542,7 +1542,7 @@ std::string FunctionSignature::toString() const { // optionals, etc. std::ostringstream ss; bool keyword_already = false; - ss << "("; + ss << '('; int i = 0; for (auto& param : params) { if (i != 0) { @@ -1552,13 +1552,13 @@ std::string FunctionSignature::toString() const { ss << "*, "; keyword_already = true; } - ss << param.type_name() << " " << param.name; + ss << param.type_name() << ' ' << param.name; if (param.optional) { ss << " = " << param.default_value; } i++; } - ss << ")"; + ss << ')'; return ss.str(); } diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index f97b6ac0ba9..3380bb0a13e 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -692,7 +692,7 @@ void initDispatchBindings(PyObject* module) { std::stringstream ss; ss << op.name; if (!op.overload_name.empty()) { - ss << "." << op.overload_name; + ss << '.' << op.overload_name; } names.emplace_back(std::move(ss).str()); } diff --git a/torch/csrc/utils/structseq.cpp b/torch/csrc/utils/structseq.cpp index 29d20d5a9bf..2e804aa44ba 100644 --- a/torch/csrc/utils/structseq.cpp +++ b/torch/csrc/utils/structseq.cpp @@ -66,7 +66,7 @@ PyObject* returned_structseq_repr(PyStructSequence* obj) { ss << ",\n"; } } - ss << ")"; + ss << ')'; return PyUnicode_FromString(ss.str().c_str()); } diff --git a/torch/csrc/utils/tensor_types.cpp b/torch/csrc/utils/tensor_types.cpp index d696a0cdf4d..c46baea82a4 100644 --- a/torch/csrc/utils/tensor_types.cpp +++ b/torch/csrc/utils/tensor_types.cpp @@ -66,14 +66,14 @@ const char* backend_to_string(const at::Backend& backend) { std::string options_to_string(const at::TensorOptions& options) { std::ostringstream ss; - ss << backend_to_string(options.backend()) << "." + ss << backend_to_string(options.backend()) << '.' << toString(at::typeMetaToScalarType(options.dtype())) << "Tensor"; return ss.str(); } std::string type_to_string(const at::DeprecatedTypeProperties& type) { std::ostringstream ss; - ss << backend_to_string(type.backend()) << "." << toString(type.scalarType()) + ss << backend_to_string(type.backend()) << '.' << toString(type.scalarType()) << "Tensor"; return ss.str(); } diff --git a/torch/csrc/xpu/Module.cpp b/torch/csrc/xpu/Module.cpp index b3d1dd929a2..ba5998ba3d3 100644 --- a/torch/csrc/xpu/Module.cpp +++ b/torch/csrc/xpu/Module.cpp @@ -367,7 +367,7 @@ static void registerXpuDeviceProperties(PyObject* module) { << ", sub_group_sizes=[" << prop.sub_group_sizes << "], has_fp16=" << prop.has_fp16 << ", has_fp64=" << prop.has_fp64 - << ", has_atomic64=" << prop.has_atomic64 << ")"; + << ", has_atomic64=" << prop.has_atomic64 << ')'; return stream.str(); }); } diff --git a/torch/nativert/executor/OpKernel.cpp b/torch/nativert/executor/OpKernel.cpp index ee4a8503d5c..fa628733804 100644 --- a/torch/nativert/executor/OpKernel.cpp +++ b/torch/nativert/executor/OpKernel.cpp @@ -65,7 +65,7 @@ std::string readableArgs( } else { ss << arg; } - ss << "\n"; + ss << '\n'; } return ss.str(); } diff --git a/torch/nativert/executor/memory/FunctionSchema.cpp b/torch/nativert/executor/memory/FunctionSchema.cpp index 264ed702cbc..80347dad296 100644 --- a/torch/nativert/executor/memory/FunctionSchema.cpp +++ b/torch/nativert/executor/memory/FunctionSchema.cpp @@ -11,8 +11,8 @@ bool FunctionSchema::alias(size_t input_idx, size_t output_idx) const { } } - VLOG(1) << "checking aliasing spec for " << c10_fn_schema_.name() << " " - << (c10_fn_schema_.is_varret() ? "varret" : "non-varret") << " " + VLOG(1) << "checking aliasing spec for " << c10_fn_schema_.name() << ' ' + << (c10_fn_schema_.is_varret() ? "varret" : "non-varret") << ' ' << (c10_fn_schema_.is_vararg() ? "vararg" : "non-vararg"); if (!aliasing_spec_.empty()) { diff --git a/torch/nativert/graph/Graph.cpp b/torch/nativert/graph/Graph.cpp index 260af58a2a4..47d082f4433 100644 --- a/torch/nativert/graph/Graph.cpp +++ b/torch/nativert/graph/Graph.cpp @@ -1031,7 +1031,7 @@ std::ostream& operator<<(std::ostream& out, const Constant& constant) { } else if constexpr (is_same_v) { out << kLayoutPrefix << arg; } else if constexpr (is_same_v) { - out << kDevicePrefix << "{" << arg << "}"; + out << kDevicePrefix << '{' << arg << '}'; } else if constexpr (is_same_v>) { out << fmt::format("[{}]", fmt::join(arg, ",")); } else if constexpr (is_same_v>) { @@ -1054,16 +1054,16 @@ void printValue(std::ostream& out, const Value* v) { } void printNamedArgument(std::ostream& out, const NamedArgument& nv) { - out << nv.name << "=" << *nv.value; + out << nv.name << '=' << *nv.value; } void printAttribute(std::ostream& out, const Attribute& nv) { - out << nv.name << "=" << nv.value; + out << nv.name << '=' << nv.value; } } // namespace std::ostream& operator<<(std::ostream& out, const Value& v) { - out << "%" << v.name(); + out << '%' << v.name(); // If a list, distinguish it by adding a [] // Looks like %my_list[] if (v.type() == Type::Kind::TensorList) { @@ -1085,14 +1085,14 @@ std::ostream& operator<<(std::ostream& out, const Node& node) { printList(out, false, node.inputs(), [](std::ostream& out, const auto& nv) { out << *nv.value; }); - out << ")"; + out << ')'; return out; } printList(out, false, node.outputs_, printValue); out << " = "; - out << node.target_ << "("; + out << node.target_ << '('; printList(out, false, node.inputs_, printNamedArgument); if (!node.inputs_.empty() && !node.attributes_.empty()) { // Emit a connective ',' between inputs and attributes. @@ -1100,13 +1100,13 @@ std::ostream& operator<<(std::ostream& out, const Node& node) { } printList(out, false, node.attributes_, printAttribute); - out << ")"; + out << ')'; return out; } std::ostream& operator<<(std::ostream& out, const Graph& graph) { for (const auto& node : graph.nodes_) { - out << node << "\n"; + out << node << '\n'; } return out; } diff --git a/torch/nativert/graph/GraphSignature.cpp b/torch/nativert/graph/GraphSignature.cpp index cd07af80719..569fff36a94 100644 --- a/torch/nativert/graph/GraphSignature.cpp +++ b/torch/nativert/graph/GraphSignature.cpp @@ -313,7 +313,7 @@ GraphSignature::GraphSignature(const torch::_export::GraphSignature& storage) { } if (FLAGS_caffe2_log_level > 2) { - std::cout << *this << "\n"; + std::cout << *this << '\n'; } } @@ -401,14 +401,14 @@ std::ostream& operator<<(std::ostream& out, const GraphSignature& sig) { if (!sig.inputsToParameters().empty()) { out << "inputsToParameters: {\n"; for (const auto& [inputName, paramName] : sig.inputsToParameters()) { - out << "\t" << inputName << " : " << paramName << "\n"; + out << '\t' << inputName << " : " << paramName << '\n'; } out << "}\n"; } if (!sig.inputsToBuffers().empty()) { out << "inputsToBuffers: {\n"; for (const auto& [inputName, bufferName] : sig.inputsToBuffers()) { - out << "\t" << inputName << " : " << bufferName << "\n"; + out << '\t' << inputName << " : " << bufferName << '\n'; } out << "}\n"; } @@ -416,28 +416,28 @@ std::ostream& operator<<(std::ostream& out, const GraphSignature& sig) { out << "inputsToTensorConstants: {\n"; for (const auto& [inputName, tensorConstantName] : sig.inputsToTensorConstants()) { - out << "\t" << inputName << " : " << tensorConstantName << "\n"; + out << '\t' << inputName << " : " << tensorConstantName << '\n'; } out << "}\n"; } if (!sig.inputsToCustomObjs().empty()) { out << "inputsToCustomObjs: {\n"; for (const auto& [inputName, customObjName] : sig.inputsToCustomObjs()) { - out << "\t" << inputName << " : " << customObjName << "\n"; + out << '\t' << inputName << " : " << customObjName << '\n'; } out << "}\n"; } if (!sig.userOutputs().empty()) { out << "userOutputs: {\n"; for (const auto& outputName : sig.userOutputs()) { - out << "\t" << outputName.value_or("Constant") << "\n"; + out << '\t' << outputName.value_or("Constant") << '\n'; } out << "}\n"; } if (!sig.buffersToMutate().empty()) { out << "buffersToMutate: {\n"; for (const auto& [outputName, mutatedBufferName] : sig.buffersToMutate()) { - out << "\t" << outputName << " : " << mutatedBufferName << "\n"; + out << '\t' << outputName << " : " << mutatedBufferName << '\n'; } out << "}\n"; } @@ -445,7 +445,7 @@ std::ostream& operator<<(std::ostream& out, const GraphSignature& sig) { out << "userInputsToMutate: {\n"; for (const auto& [outputName, mutatedUserInputName] : sig.userInputsToMutate()) { - out << "\t" << outputName << " : " << mutatedUserInputName << "\n"; + out << '\t' << outputName << " : " << mutatedUserInputName << '\n'; } out << "}\n"; } @@ -453,7 +453,7 @@ std::ostream& operator<<(std::ostream& out, const GraphSignature& sig) { if (!sig.gradientsToParameters().empty()) { out << "gradientsToParameters: {\n"; for (const auto& [outputName, paramName] : sig.gradientsToParameters()) { - out << "\t" << outputName << " : " << paramName << "\n"; + out << '\t' << outputName << " : " << paramName << '\n'; } out << "}\n"; } @@ -461,11 +461,11 @@ std::ostream& operator<<(std::ostream& out, const GraphSignature& sig) { out << "gradientsToUserInputs: {\n"; for (const auto& [outputName, userInputName] : sig.gradientsToUserInputs()) { - out << "\t" << outputName << " : " << userInputName << "\n"; + out << '\t' << outputName << " : " << userInputName << '\n'; } out << "}\n"; } - out << "lossOutput: " << sig.lossOutput() << "\n"; + out << "lossOutput: " << sig.lossOutput() << '\n'; } return out; } diff --git a/torch/nativert/graph/passes/pass_manager/PassManager.cpp b/torch/nativert/graph/passes/pass_manager/PassManager.cpp index e023f223ed6..4dbb0012877 100644 --- a/torch/nativert/graph/passes/pass_manager/PassManager.cpp +++ b/torch/nativert/graph/passes/pass_manager/PassManager.cpp @@ -35,7 +35,7 @@ bool GraphPassManager::run_pass(Graph* graph, const GraphPassIdentifier& name) { bool GraphPassManager::pass_pre_run_hook(Graph* graph, const GraphPass& pass) { if (opts_.logGraphBetweenPasses()) { - LOG(INFO) << "Before pass: " << pass.name() << "\n" + LOG(INFO) << "Before pass: " << pass.name() << '\n' << graph->toString() << "-------------------------"; } return false; @@ -43,7 +43,7 @@ bool GraphPassManager::pass_pre_run_hook(Graph* graph, const GraphPass& pass) { bool GraphPassManager::pass_post_run_hook(Graph* graph, const GraphPass& pass) { if (opts_.logGraphBetweenPasses()) { - LOG(INFO) << "After pass: " << pass.name() << "\n" + LOG(INFO) << "After pass: " << pass.name() << '\n' << graph->toString() << "-------------------------"; } return false;