mirror of
https://github.com/zebrajr/pytorch.git
synced 2026-01-15 12:15:51 +00:00
[BE]: Mark more hash impls as noexcept for efficiency (#171388)
Mark hashes as noexcept allow them to be recomputed as needed by STL container objects allowing them to use more efficient implementations. Most of these have very simple hash implementation or cache their hash values anyway Pull Request resolved: https://github.com/pytorch/pytorch/pull/171388 Approved by: https://github.com/Lucaskabela, https://github.com/drisspg
This commit is contained in:
committed by
PyTorch MergeBot
parent
8bb42222f3
commit
0db3b1eee4
@@ -50,7 +50,7 @@ enum class C10_API_ENUM RecordScope : uint8_t {
|
|||||||
namespace std {
|
namespace std {
|
||||||
template <>
|
template <>
|
||||||
struct hash<at::RecordScope> {
|
struct hash<at::RecordScope> {
|
||||||
size_t operator()(const at::RecordScope& sc) const {
|
size_t operator()(const at::RecordScope& sc) const noexcept {
|
||||||
return static_cast<std::size_t>(sc);
|
return static_cast<std::size_t>(sc);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -738,7 +738,7 @@ struct hash<c10::DispatchKey> {
|
|||||||
typedef size_t result_type;
|
typedef size_t result_type;
|
||||||
typedef c10::DispatchKey argument_type;
|
typedef c10::DispatchKey argument_type;
|
||||||
|
|
||||||
size_t operator()(c10::DispatchKey x) const {
|
size_t operator()(c10::DispatchKey x) const noexcept {
|
||||||
return static_cast<size_t>(x);
|
return static_cast<size_t>(x);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1101,7 +1101,7 @@ class NativeOpSchema {
|
|||||||
comparison_key_ == rhs.comparison_key_;
|
comparison_key_ == rhs.comparison_key_;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t hash() const {
|
std::size_t hash() const noexcept {
|
||||||
return hash_;
|
return hash_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1131,7 +1131,7 @@ class NativeOpSchema {
|
|||||||
namespace std {
|
namespace std {
|
||||||
template <>
|
template <>
|
||||||
struct hash<NativeOpSchema> {
|
struct hash<NativeOpSchema> {
|
||||||
std::size_t operator()(const NativeOpSchema& schema) const {
|
std::size_t operator()(const NativeOpSchema& schema) const noexcept {
|
||||||
return schema.hash();
|
return schema.hash();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ struct CacheKey {
|
|||||||
std::memcmp(key, other.key, key_size) == 0;
|
std::memcmp(key, other.key, key_size) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t hash() const {
|
size_t hash() const noexcept {
|
||||||
// don't bother hashing the key data, common case 1 cache entry per node
|
// don't bother hashing the key data, common case 1 cache entry per node
|
||||||
return std::hash<std::type_index>()(node_type) ^ key_size;
|
return std::hash<std::type_index>()(node_type) ^ key_size;
|
||||||
}
|
}
|
||||||
@@ -1555,7 +1555,7 @@ struct PackedArgs {
|
|||||||
|
|
||||||
template <>
|
template <>
|
||||||
struct std::hash<torch::dynamo::autograd::CacheKey> {
|
struct std::hash<torch::dynamo::autograd::CacheKey> {
|
||||||
size_t operator()(const torch::dynamo::autograd::CacheKey& k) const {
|
size_t operator()(const torch::dynamo::autograd::CacheKey& k) const noexcept {
|
||||||
return k.hash();
|
return k.hash();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -153,7 +153,7 @@ struct ArgumentSpec {
|
|||||||
bool isPresent(size_t i) const {
|
bool isPresent(size_t i) const {
|
||||||
return optional_presence[i];
|
return optional_presence[i];
|
||||||
}
|
}
|
||||||
size_t hashCode() const {
|
size_t hashCode() const noexcept {
|
||||||
return hash_code;
|
return hash_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -308,7 +308,7 @@ struct CompleteArgumentSpec {
|
|||||||
size_t size() const {
|
size_t size() const {
|
||||||
return ninputs;
|
return ninputs;
|
||||||
}
|
}
|
||||||
size_t hashCode() const {
|
size_t hashCode() const noexcept {
|
||||||
return hash_code;
|
return hash_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,13 +488,14 @@ struct hash<c10::TensorType> {
|
|||||||
|
|
||||||
template <>
|
template <>
|
||||||
struct hash<torch::jit::ArgumentSpec> {
|
struct hash<torch::jit::ArgumentSpec> {
|
||||||
size_t operator()(const torch::jit::ArgumentSpec& spec) const {
|
size_t operator()(const torch::jit::ArgumentSpec& spec) const noexcept {
|
||||||
return spec.hashCode();
|
return spec.hashCode();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
template <>
|
template <>
|
||||||
struct hash<torch::jit::CompleteArgumentSpec> {
|
struct hash<torch::jit::CompleteArgumentSpec> {
|
||||||
size_t operator()(const torch::jit::CompleteArgumentSpec& spec) const {
|
size_t operator()(
|
||||||
|
const torch::jit::CompleteArgumentSpec& spec) const noexcept {
|
||||||
return spec.hashCode();
|
return spec.hashCode();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ struct TORCH_API SimplifierHashType {
|
|||||||
namespace std {
|
namespace std {
|
||||||
template <>
|
template <>
|
||||||
struct hash<torch::jit::tensorexpr::SimplifierHashType> {
|
struct hash<torch::jit::tensorexpr::SimplifierHashType> {
|
||||||
size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const {
|
size_t operator()(
|
||||||
|
const torch::jit::tensorexpr::SimplifierHashType& k) const noexcept {
|
||||||
return k._h;
|
return k._h;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user