diff --git a/.lintrunner.toml b/.lintrunner.toml index cd5d338b636..8b577c47e0d 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -1113,7 +1113,6 @@ exclude_patterns = [ # These files are all grandfathered in, feel free to remove from this list # as necessary # NOTE: remove the patterns in the order they are listed - 'aten/src/ATen/[a-mA-M]*/**', 'test/**', ] init_command = [ diff --git a/aten/src/ATen/core/DimVector.h b/aten/src/ATen/core/DimVector.h index 576b9e142eb..aadb3fa867f 100644 --- a/aten/src/ATen/core/DimVector.h +++ b/aten/src/ATen/core/DimVector.h @@ -3,7 +3,7 @@ namespace at { -// Re-declaring 'DimVector' type and size inside 'at' namespace. +// Redeclaring 'DimVector' type and size inside 'at' namespace. // This is done to avoid modifying every use into their 'c10' // equivalent. diff --git a/aten/src/ATen/core/GeneratorForPrivateuseone.cpp b/aten/src/ATen/core/GeneratorForPrivateuseone.cpp index 030e9f70851..7dca153436d 100644 --- a/aten/src/ATen/core/GeneratorForPrivateuseone.cpp +++ b/aten/src/ATen/core/GeneratorForPrivateuseone.cpp @@ -16,7 +16,7 @@ _GeneratorRegister::_GeneratorRegister(const GeneratorFuncType& func) { TORCH_WARN_DEPRECATION( "REGISTER_GENERATOR_PRIVATEUSE1 is deprecated. \ - Please derive PrivateUse1HooksInterface to implememt getNewGenerator instead.") + Please derive PrivateUse1HooksInterface to implement getNewGenerator instead.") TORCH_CHECK( !GetGeneratorPrivate().has_value(), diff --git a/aten/src/ATen/core/IListRef.h b/aten/src/ATen/core/IListRef.h index a11a78c03a3..8ea6249f2b6 100644 --- a/aten/src/ATen/core/IListRef.h +++ b/aten/src/ATen/core/IListRef.h @@ -149,7 +149,7 @@ * First, keep in mind that we assume that boxed containers will * have to deal with `IValue` (e.g. `c10::List`). In this context, * what may be happening is that `IValue` doesn't store internally - * your type `T`. Instead, it constructs a type new `T` everytime + * your type `T`. Instead, it constructs a type new `T` every time * you try to get `T` for it (see `IListRef`). */ @@ -186,7 +186,7 @@ class IListRef; * This macro is useful because it allows us to handle different * types (that correspond to different tags) to be implemented * only once. We can do it even when the implementation of the - * different tags aren't syntatically the same, by dispatching + * different tags aren't syntactically the same, by dispatching * it to a function (e.g. `ImplT::(this_)`). */ #define TORCH_ILISTREF_UNWRAP(TAG, BODY) \ diff --git a/aten/src/ATen/core/IListRef_inl.h b/aten/src/ATen/core/IListRef_inl.h index df320c13d9c..425a80a710f 100644 --- a/aten/src/ATen/core/IListRef_inl.h +++ b/aten/src/ATen/core/IListRef_inl.h @@ -42,7 +42,7 @@ class IListRefTagImplBase { /* * We have these function (besides the `unwrap`s above) because the * implementation for both `IListRef::operator[]` and `IListRefIterator::operator*` - * weren't syntatically equal for the existing tags at the time + * weren't syntactically equal for the existing tags at the time * (`Unboxed` and `Boxed`). */ static IListRefConstRef front(const list_type& lst) { diff --git a/aten/src/ATen/core/Variadic.h b/aten/src/ATen/core/Variadic.h index da4df1b1b1a..f594deb5665 100644 --- a/aten/src/ATen/core/Variadic.h +++ b/aten/src/ATen/core/Variadic.h @@ -12,7 +12,7 @@ namespace at { // in order. This is most commonly used in autogenerated code, // where it is convenient to have a function that can uniformly // take arguments of different types. If your arguments -// are homogenous consider using a std::initializer_list instead. +// are homogeneous consider using a std::initializer_list instead. // // For examples of this in use, see torch/csrc/utils/variadic.h template diff --git a/aten/src/ATen/core/dispatch/Dispatcher.cpp b/aten/src/ATen/core/dispatch/Dispatcher.cpp index 5facca30a54..1291b4d3c32 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.cpp +++ b/aten/src/ATen/core/dispatch/Dispatcher.cpp @@ -111,7 +111,7 @@ void Dispatcher::waitForDef(const FunctionSchema& schema) { TORCH_INTERNAL_ASSERT(r, "Expected main interpreter to define ", schema.operator_name(), ", but this didn't happen within timeout. Are you trying to load " - "different models in the same torchdeploy/multipy instance? You " + "different models in the same torchdeploy/multipy instance? You " // codespell:ignore "must warmup each interpreter identically, e.g., import all " "the same dependencies."); } @@ -129,7 +129,7 @@ void Dispatcher::waitForImpl(const OperatorName& op_name, std::optional= 0 && static_cast(idx) < backendFallbackKernels_.size(), "idx=", idx); - // NB: Perserve BC for registering fallback for AutogradPrivateUse1 multiple time, - // refer to https://github.com/pytorch/pytorch/issues/163979 for more informations. + // NB: Preserve BC for registering fallback for AutogradPrivateUse1 multiple time, + // refer to https://github.com/pytorch/pytorch/issues/163979 for more information. TORCH_CHECK( dispatchKey == DispatchKey::AutogradPrivateUse1 || !backendFallbackKernels_[idx].kernel.isValid(), diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h index 880de786b70..6b63bd48009 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.h +++ b/aten/src/ATen/core/dispatch/Dispatcher.h @@ -222,7 +222,8 @@ class TORCH_API Dispatcher final { return backendFallbackKernels_[dispatch_ix].kernel.isValid(); } - // Used by torchdeploy/multipy for multiple interpreters racing. + // Used by torchdeploy/multipy for multiple // codespell:ignore: multipy + // interpreters racing. void waitForDef(const FunctionSchema& schema); void waitForImpl( const OperatorName& op_name, @@ -414,7 +415,7 @@ class TORCH_API Dispatcher final { std::unique_ptr listeners_; // This condition variable gets notified whenever we add a new def/impl to the - // dispatch table. This is primarily used by multipy/torchdeploy, when + // dispatch table. This is primarily used by multiply/torchdeploy, when // we have multiple interpreters trying to register to the dispatch table. // In this situation, whenever the non-primary interpreter would have tried // to register to the dispatch table, instead it will check to see if the diff --git a/aten/src/ATen/core/ivalue_inl.h b/aten/src/ATen/core/ivalue_inl.h index ac7540cffd1..f384a3ea46f 100644 --- a/aten/src/ATen/core/ivalue_inl.h +++ b/aten/src/ATen/core/ivalue_inl.h @@ -992,7 +992,7 @@ struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target { std::unique_lock lock(mutex_); if (completed_) { // This should be rare and shouldn't cause log spew. Its important to - // log errors and thats why we have this log here. + // log errors and that's why we have this log here. std::string msg = c10::str( "Skipping setting following error on the Future since " "it is already marked completed (this is not necessarily " diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h index 535831ea11d..5378bd0b3d1 100644 --- a/aten/src/ATen/core/jit_type.h +++ b/aten/src/ATen/core/jit_type.h @@ -887,7 +887,7 @@ struct TORCH_API ListType // this function will return the global singleton type pointer // the type List. // The extra "identifier" argument is needed because we have multiple container types - // that all re-use this function (List, array, etc.) + // that all reuse this function (List, array, etc.) static TypePtr get(const std::string& identifier, TypePtr inner); // common cast List[Tensor] @@ -985,7 +985,7 @@ struct TORCH_API DictType : public SharedType { // this function will return the global singleton type pointer // the type List. // The extra "identifier" argument is needed because we have multiple container types - // that all re-use this function (Dict and unordered_map) + // that all reuse this function (Dict and unordered_map) static TypePtr get(const std::string& identifier, TypePtr key, TypePtr val); private: diff --git a/aten/src/ATen/cpu/vec/vec512/vec512_float8.h b/aten/src/ATen/cpu/vec/vec512/vec512_float8.h index 12ee4c46064..0a54986d82b 100644 --- a/aten/src/ATen/cpu/vec/vec512/vec512_float8.h +++ b/aten/src/ATen/cpu/vec/vec512/vec512_float8.h @@ -498,8 +498,8 @@ static inline Vectorized binary_fp8_op_as_fp32( // Refer to // https://github.com/pytorch/pytorch/pull/153364#discussion_r2086509353 FP8 +, -// -, *, /, planed to be deleted in the future and here is just to make compiler -// happy +// -, *, /, planned to be deleted in the future and here is just to make +// compiler happy Vectorized inline operator+( const Vectorized& a, const Vectorized& b) { @@ -585,8 +585,8 @@ class Vectorized : public Vectorizedf8 { // Refer to // https://github.com/pytorch/pytorch/pull/153364#discussion_r2086509353 FP8 +, -// -, *, /, planed to be deleted in the future and here is just to make compiler -// happy +// -, *, /, planned to be deleted in the future and here is just to make +// compiler happy Vectorized inline operator+( const Vectorized& a, const Vectorized& b) { diff --git a/aten/src/ATen/cuda/CUDAGreenContext.cpp b/aten/src/ATen/cuda/CUDAGreenContext.cpp index 8aa05b80f82..a579e45e160 100644 --- a/aten/src/ATen/cuda/CUDAGreenContext.cpp +++ b/aten/src/ATen/cuda/CUDAGreenContext.cpp @@ -7,7 +7,7 @@ #define HAS_CUDA_GREEN_CONTEXT() 1 #else #define HAS_CUDA_GREEN_CONTEXT() 0 -// Suppress unsued private field warnings as this class is not supposed to be called +// Suppress unused private field warnings as this class is not supposed to be called C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wunused-private-field") #endif diff --git a/aten/src/ATen/cuda/CUDASparseDescriptors.cpp b/aten/src/ATen/cuda/CUDASparseDescriptors.cpp index d5f04df55f9..c7ab4fbfc95 100644 --- a/aten/src/ATen/cuda/CUDASparseDescriptors.cpp +++ b/aten/src/ATen/cuda/CUDASparseDescriptors.cpp @@ -179,7 +179,7 @@ CuSparseSpMatCsrDescriptor::CuSparseSpMatCsrDescriptor(const Tensor& input, int6 batch_offset * values_batch_stride * values.itemsize(), index_type, // data type of row offsets index index_type, // data type of col indices - CUSPARSE_INDEX_BASE_ZERO, // base index of row offset and col indes + CUSPARSE_INDEX_BASE_ZERO, // base index of row offset and col index value_type // data type of values )); diff --git a/aten/src/ATen/cuda/CachingHostAllocator.h b/aten/src/ATen/cuda/CachingHostAllocator.h index b9486314b1c..53b0cdced4c 100644 --- a/aten/src/ATen/cuda/CachingHostAllocator.h +++ b/aten/src/ATen/cuda/CachingHostAllocator.h @@ -10,7 +10,7 @@ namespace at::cuda { // // A caching allocator for CUDA host allocations (pinned memory). // -// This provides a drop-in replacement for THCudaHostAllocator, which re-uses +// This provides a drop-in replacement for THCudaHostAllocator, which reuses // freed pinned (page-locked) memory allocations. This avoids device // synchronizations due to cudaFreeHost calls. // @@ -26,7 +26,7 @@ inline TORCH_CUDA_CPP_API at::HostAllocator* getCachingHostAllocator() { } // Records an event in the specified stream. The allocation corresponding to the -// input `ptr`/`ctx` will not be re-used until the event has occurred. +// input `ptr`/`ctx` will not be reused until the event has occurred. C10_DEPRECATED_MESSAGE( "at::cuda::CachingHostAllocator_recordEvent(...) is deprecated. Please use at::getHostAllocator(at::kCUDA)->record_event(...) instead.") inline TORCH_CUDA_CPP_API bool CachingHostAllocator_recordEvent( diff --git a/aten/src/ATen/cuda/detail/TensorInfo.cuh b/aten/src/ATen/cuda/detail/TensorInfo.cuh index a320000ae88..9f3f7d31add 100644 --- a/aten/src/ATen/cuda/detail/TensorInfo.cuh +++ b/aten/src/ATen/cuda/detail/TensorInfo.cuh @@ -93,7 +93,7 @@ struct IndexToOffset { } }; -// Uses dynamic (runtime) instead of static (compiletime) dims +// Uses dynamic (runtime) instead of static (compile time) dims template struct IndexToOffset { static inline __host__ __device__ IndexType get( diff --git a/aten/src/ATen/cuda/jiterator.cu b/aten/src/ATen/cuda/jiterator.cu index d664c828bda..0545c8354ed 100644 --- a/aten/src/ATen/cuda/jiterator.cu +++ b/aten/src/ATen/cuda/jiterator.cu @@ -32,7 +32,7 @@ static inline void launch_jitted_vectorized_kernel_dynamic( // Different kernels are compiled depending on what we're vectorizing up to (1, 2 or 4 elements) // fn_ptr is set to the appropriate function based on the vec size and GPU used - // TODO: Memory use can probably be optimized by re-using kernels across GPUs with + // TODO: Memory use can probably be optimized by reusing kernels across GPUs with // the same compute capability std::string f_inputs_type_str = at::cuda::jit::typeName(common_dtype); diff --git a/aten/src/ATen/functorch/LegacyVmapTransforms.h b/aten/src/ATen/functorch/LegacyVmapTransforms.h index 390989d45bf..bf21951f222 100644 --- a/aten/src/ATen/functorch/LegacyVmapTransforms.h +++ b/aten/src/ATen/functorch/LegacyVmapTransforms.h @@ -143,7 +143,7 @@ struct TORCH_API VmapPhysicalView { // mapping a physical tensor to a new logical tensor (BatchedTensor) VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const; - // Maps a logical shape to a physical shape by pre-pending the batch + // Maps a logical shape to a physical shape by prepending the batch // sizes to the logical shape. VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const; SymDimVector getPhysicalShape(c10::SymIntArrayRef logical_shape) const; diff --git a/aten/src/ATen/functorch/TensorWrapper.h b/aten/src/ATen/functorch/TensorWrapper.h index bf7b14fd416..281682fa8bc 100644 --- a/aten/src/ATen/functorch/TensorWrapper.h +++ b/aten/src/ATen/functorch/TensorWrapper.h @@ -27,7 +27,7 @@ namespace at::functorch { // // There are alternative designs we could have chosen (e.g. each grad transform // stores a weak map of Tensor -> AutogradMeta); the benefit of the TensorWrapper -// design is that we can re-use existing VariableType kernels (i.e. Autograd kernels) +// design is that we can reuse existing VariableType kernels (i.e. Autograd kernels) // without much modification. Since a TensorWrapper looks like a regular Tensor, // the VariableType kernel can pull out the AutogradMeta struct from where it // expects and extend the autograd graph