diff --git a/aten/src/ATen/CPUApplyUtils.h b/aten/src/ATen/CPUApplyUtils.h index c8a735c1775..780510579a7 100644 --- a/aten/src/ATen/CPUApplyUtils.h +++ b/aten/src/ATen/CPUApplyUtils.h @@ -64,8 +64,12 @@ struct strided_tensor_iter_fixed { int64_t strides_[N] = {0}; strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete; - void operator=(strided_tensor_iter_fixed const& x) = delete; - strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default; + strided_tensor_iter_fixed& operator=(strided_tensor_iter_fixed const& x) = + delete; + strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) noexcept = default; + strided_tensor_iter_fixed& operator=(strided_tensor_iter_fixed&& x) noexcept = + default; + ~strided_tensor_iter_fixed() noexcept = default; strided_tensor_iter_fixed( Tensor& tensor, [[maybe_unused]] bool sort_strides = false) @@ -93,8 +97,10 @@ struct strided_tensor_iter { std::vector strides_; strided_tensor_iter(strided_tensor_iter const&) = delete; - void operator=(strided_tensor_iter const& x) = delete; - strided_tensor_iter(strided_tensor_iter&&) = default; + strided_tensor_iter& operator=(strided_tensor_iter const& x) = delete; + strided_tensor_iter(strided_tensor_iter&&) noexcept = default; + strided_tensor_iter& operator=(strided_tensor_iter&&) noexcept = default; + ~strided_tensor_iter() noexcept = default; strided_tensor_iter(Tensor& tensor) : data_(tensor.data_ptr()), dim_(tensor.ndimension()), diff --git a/c10/util/ThreadLocal.h b/c10/util/ThreadLocal.h index 850bb5d4c42..c6f3d6d874b 100644 --- a/c10/util/ThreadLocal.h +++ b/c10/util/ThreadLocal.h @@ -115,7 +115,10 @@ class ThreadLocal { explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {} ThreadLocal(const ThreadLocal&) = delete; + ThreadLocal(ThreadLocal&&) noexcept = default; ThreadLocal& operator=(const ThreadLocal&) = delete; + ThreadLocal& operator=(ThreadLocal&&) noexcept = default; + ~ThreadLocal() = default; Type& get() { return *accessor_(); diff --git a/c10/util/ThreadLocalDebugInfo.h b/c10/util/ThreadLocalDebugInfo.h index bea8c5f27ac..3d26dd44f6a 100644 --- a/c10/util/ThreadLocalDebugInfo.h +++ b/c10/util/ThreadLocalDebugInfo.h @@ -74,6 +74,8 @@ class C10_API DebugInfoGuard { DebugInfoGuard(const DebugInfoGuard&) = delete; DebugInfoGuard(DebugInfoGuard&&) = delete; + DebugInfoGuard& operator=(const DebugInfoGuard&) = delete; + DebugInfoGuard& operator=(DebugInfoGuard&&) = delete; private: bool active_ = false; diff --git a/torch/csrc/autograd/graph_task.h b/torch/csrc/autograd/graph_task.h index e4a7ae4dad1..018beaffdaa 100644 --- a/torch/csrc/autograd/graph_task.h +++ b/torch/csrc/autograd/graph_task.h @@ -48,6 +48,9 @@ struct GraphTask : std::enable_shared_from_this { struct Capture { Capture(const Capture&) = delete; Capture(Capture&&) = default; + Capture& operator=(const Capture&) = delete; + Capture& operator=(Capture&&) = default; + ~Capture() = default; Capture(int input_idx, int output_idx) : input_idx_(input_idx), output_idx_(output_idx) {} diff --git a/torch/csrc/dynamo/python_compiled_autograd.cpp b/torch/csrc/dynamo/python_compiled_autograd.cpp index 7a5969fffba..024603270f7 100644 --- a/torch/csrc/dynamo/python_compiled_autograd.cpp +++ b/torch/csrc/dynamo/python_compiled_autograd.cpp @@ -777,6 +777,7 @@ CacheNode* _compiled_autograd_impl( return cache; } +// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions) struct LockGuardWithErrorLogs { LockGuardWithErrorLogs(std::mutex& mtx) : mtx_(mtx) { // Note: the standard allows try_lock to fail spuriously during races for diff --git a/torch/csrc/profiler/containers.h b/torch/csrc/profiler/containers.h index 6ff73917d91..060c6e3b534 100644 --- a/torch/csrc/profiler/containers.h +++ b/torch/csrc/profiler/containers.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -52,7 +51,10 @@ class AppendOnlyList { AppendOnlyList() : buffer_last_{buffer_.before_begin()} {} AppendOnlyList(const AppendOnlyList&) = delete; + AppendOnlyList(AppendOnlyList&&) = delete; AppendOnlyList& operator=(const AppendOnlyList&) = delete; + AppendOnlyList& operator=(AppendOnlyList&&) = delete; + ~AppendOnlyList() = default; size_t size() const { return n_blocks_ * ChunkSize - (size_t)(end_ - next_); diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp index c1c8feea13c..ef70242eafb 100644 --- a/torch/csrc/profiler/kineto_shim.cpp +++ b/torch/csrc/profiler/kineto_shim.cpp @@ -96,8 +96,6 @@ TraceWrapper::TraceWrapper(const int64_t start_time, const std::string& name) } #endif // USE_KINETO -TraceWrapper::~TraceWrapper() = default; - activity_t* TraceWrapper::addCPUActivity( const std::string& name, const libkineto::ActivityType type, diff --git a/torch/csrc/profiler/kineto_shim.h b/torch/csrc/profiler/kineto_shim.h index 44509e4a5e6..085e9dd2fcb 100644 --- a/torch/csrc/profiler/kineto_shim.h +++ b/torch/csrc/profiler/kineto_shim.h @@ -67,9 +67,6 @@ void addMetadata( // Wraps: libkineto::CpuTraceBuffer struct TraceWrapper { TraceWrapper(const int64_t start_time, const std::string& name); - TraceWrapper(TraceWrapper&&) = default; - TraceWrapper(const TraceWrapper&) = delete; - ~TraceWrapper(); // The caller is expected to hold a mutex when calling `addCPUActivity`. activity_t* addCPUActivity( @@ -96,8 +93,6 @@ struct TraceWrapper { struct ActivityTraceWrapper { explicit ActivityTraceWrapper(std::unique_ptr&& trace); ActivityTraceWrapper() = default; - ActivityTraceWrapper(ActivityTraceWrapper&&) = default; - ActivityTraceWrapper(const ActivityTraceWrapper&) = delete; explicit operator bool() const; void save(const std::string& path); diff --git a/torch/csrc/profiler/stubs/base.cpp b/torch/csrc/profiler/stubs/base.cpp index a5a5dead6fa..6ee455ca7e9 100644 --- a/torch/csrc/profiler/stubs/base.cpp +++ b/torch/csrc/profiler/stubs/base.cpp @@ -1,28 +1,31 @@ -#include - +#include #include +#include +#include +#include namespace torch::profiler::impl { -ProfilerStubs::~ProfilerStubs() = default; - namespace { struct DefaultStubs : public ProfilerStubs { - DefaultStubs(const char* name) : name_{name} {} + explicit DefaultStubs(const char* name) : name_{name} {} - void record(c10::DeviceIndex*, ProfilerVoidEventStub*, int64_t*) - const override { + void record( + c10::DeviceIndex* /*device*/, + ProfilerVoidEventStub* /*event*/, + int64_t* /*cpu_ns*/) const override { fail(); } - float elapsed(const ProfilerVoidEventStub*, const ProfilerVoidEventStub*) - const override { + float elapsed( + const ProfilerVoidEventStub* /*event*/, + const ProfilerVoidEventStub* /*event2*/) const override { fail(); - return 0.f; + return 0.F; } - void mark(const char*) const override { + void mark(const char* /*name*/) const override { fail(); } - void rangePush(const char*) const override { + void rangePush(const char* /*name*/) const override { fail(); } void rangePop() const override { @@ -31,7 +34,7 @@ struct DefaultStubs : public ProfilerStubs { bool enabled() const override { return false; } - void onEachDevice(std::function) const override { + void onEachDevice(std::function /*op*/) const override { fail(); } void synchronize() const override { diff --git a/torch/csrc/profiler/stubs/base.h b/torch/csrc/profiler/stubs/base.h index c8a0e6cd2eb..c64f4e5a6c9 100644 --- a/torch/csrc/profiler/stubs/base.h +++ b/torch/csrc/profiler/stubs/base.h @@ -33,7 +33,7 @@ struct TORCH_API ProfilerStubs { } virtual void onEachDevice(std::function op) const = 0; virtual void synchronize() const = 0; - virtual ~ProfilerStubs(); + virtual ~ProfilerStubs() = default; }; TORCH_API void registerCUDAMethods(ProfilerStubs* stubs); diff --git a/torch/csrc/profiler/unwind/communicate.h b/torch/csrc/profiler/unwind/communicate.h index 6ace27c543d..bdaca33b6db 100644 --- a/torch/csrc/profiler/unwind/communicate.h +++ b/torch/csrc/profiler/unwind/communicate.h @@ -41,6 +41,10 @@ struct Communicate { err_ = std::make_unique(errbuf_.get()); } } + Communicate(const Communicate&) = delete; + Communicate(Communicate&&) = delete; + Communicate& operator=(const Communicate&) = delete; + Communicate& operator=(Communicate&&) = delete; ~Communicate() { close(inpipe_[1]); close(outpipe_[0]); diff --git a/torch/csrc/profiler/unwind/mem_file.h b/torch/csrc/profiler/unwind/mem_file.h index b5b6807a7bb..2580e6f6da5 100644 --- a/torch/csrc/profiler/unwind/mem_file.h +++ b/torch/csrc/profiler/unwind/mem_file.h @@ -81,7 +81,9 @@ struct MemFile { } MemFile(const MemFile&) = delete; + MemFile(MemFile&&) = delete; MemFile& operator=(const MemFile&) = delete; + MemFile& operator=(MemFile&&) = delete; [[nodiscard]] const char* data() const { return (const char*)mem_; } diff --git a/torch/csrc/profiler/unwind/unwind.cpp b/torch/csrc/profiler/unwind/unwind.cpp index db903ca1af7..bed30724582 100644 --- a/torch/csrc/profiler/unwind/unwind.cpp +++ b/torch/csrc/profiler/unwind/unwind.cpp @@ -2,7 +2,6 @@ #include #include #include -#include #if !defined(__linux__) || !defined(__x86_64__) || !defined(__has_include) || \ !__has_include("ext/stdio_filebuf.h") @@ -66,6 +65,10 @@ struct UpgradeExclusive { rdlock_.unlock(); rdlock_.mutex()->lock(); } + UpgradeExclusive(const UpgradeExclusive&) = delete; + UpgradeExclusive(UpgradeExclusive&&) = delete; + UpgradeExclusive& operator=(const UpgradeExclusive&) = delete; + UpgradeExclusive& operator=(UpgradeExclusive&&) = delete; ~UpgradeExclusive() { rdlock_.mutex()->unlock(); rdlock_.lock(); diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index 4d698701459..d26f8c2ee1d 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -116,6 +116,7 @@ struct Option { Option(Option&& other) noexcept = default; Option& operator=(const Option&) = delete; Option& operator=(Option&&) = delete; + ~Option() = default; std::vector arguments; bool is_variadic; diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index 1dad860f6d9..c5a659f371d 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -97,6 +97,10 @@ struct EnableHermeticPyObject { c10::impl::tls_set_dispatch_key_included( at::DispatchKey::PythonTLSSnapshot, old_python_snapshot_); } + EnableHermeticPyObject(const EnableHermeticPyObject&) = delete; + EnableHermeticPyObject(EnableHermeticPyObject&&) = delete; + EnableHermeticPyObject& operator=(const EnableHermeticPyObject&) = delete; + EnableHermeticPyObject& operator=(EnableHermeticPyObject&&) = delete; bool old_; bool old_excluded_python_; bool old_python_; diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index 5de450c367a..099991f8414 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -853,6 +853,14 @@ class CheckSparseTensorInvariantsContext { ~CheckSparseTensorInvariantsContext() { at::globalContext().setCheckSparseTensorInvariants(state); } + CheckSparseTensorInvariantsContext( + const CheckSparseTensorInvariantsContext&) = delete; + CheckSparseTensorInvariantsContext(CheckSparseTensorInvariantsContext&&) = + delete; + CheckSparseTensorInvariantsContext& operator=( + const CheckSparseTensorInvariantsContext&) = delete; + CheckSparseTensorInvariantsContext& operator=( + CheckSparseTensorInvariantsContext&&) = delete; private: bool state; diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h index 2eb8ba7a1cb..8fe5404b44a 100644 --- a/torch/csrc/utils/torch_dispatch_mode.h +++ b/torch/csrc/utils/torch_dispatch_mode.h @@ -27,6 +27,12 @@ struct StashTorchDispatchModeGuard { std::move(saved_mode_)); } } + StashTorchDispatchModeGuard(const StashTorchDispatchModeGuard&) = delete; + StashTorchDispatchModeGuard(StashTorchDispatchModeGuard&&) = delete; + StashTorchDispatchModeGuard& operator=(const StashTorchDispatchModeGuard&) = + delete; + StashTorchDispatchModeGuard& operator=(StashTorchDispatchModeGuard&&) = + delete; const std::shared_ptr& get_cur_mode() { return saved_mode_; @@ -44,6 +50,12 @@ struct StashTorchDispatchStackGuard { c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_)); saved_state_ = std::move(old); } + StashTorchDispatchStackGuard(const StashTorchDispatchStackGuard&) = delete; + StashTorchDispatchStackGuard(StashTorchDispatchStackGuard&&) = delete; + StashTorchDispatchStackGuard& operator=(const StashTorchDispatchStackGuard&) = + delete; + StashTorchDispatchStackGuard& operator=(StashTorchDispatchStackGuard&&) = + delete; ~StashTorchDispatchStackGuard() { c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_));