mirror of
https://github.com/zebrajr/pytorch.git
synced 2026-01-15 12:15:51 +00:00
Remove useless super() delegation (#167791)
This PR removes useless super() delegations detected by pylint. Pull Request resolved: https://github.com/pytorch/pytorch/pull/167791 Approved by: https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
bc60b86066
commit
de0d69b2c4
@@ -123,9 +123,6 @@ class L1Loss(_Loss):
|
||||
|
||||
__constants__ = ["reduction"]
|
||||
|
||||
def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None:
|
||||
super().__init__(size_average, reduce, reduction)
|
||||
|
||||
def forward(self, input: Tensor, target: Tensor) -> Tensor:
|
||||
"""
|
||||
Runs the forward pass.
|
||||
@@ -623,9 +620,6 @@ class MSELoss(_Loss):
|
||||
|
||||
__constants__ = ["reduction"]
|
||||
|
||||
def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None:
|
||||
super().__init__(size_average, reduce, reduction)
|
||||
|
||||
def forward(self, input: Tensor, target: Tensor) -> Tensor:
|
||||
"""
|
||||
Runs the forward pass.
|
||||
@@ -710,15 +704,6 @@ class BCELoss(_WeightedLoss):
|
||||
|
||||
__constants__ = ["reduction"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
weight: Optional[Tensor] = None,
|
||||
size_average=None,
|
||||
reduce=None,
|
||||
reduction: str = "mean",
|
||||
) -> None:
|
||||
super().__init__(weight, size_average, reduce, reduction)
|
||||
|
||||
def forward(self, input: Tensor, target: Tensor) -> Tensor:
|
||||
"""
|
||||
Runs the forward pass.
|
||||
@@ -983,9 +968,6 @@ class MultiLabelMarginLoss(_Loss):
|
||||
|
||||
__constants__ = ["reduction"]
|
||||
|
||||
def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None:
|
||||
super().__init__(size_average, reduce, reduction)
|
||||
|
||||
def forward(self, input: Tensor, target: Tensor) -> Tensor:
|
||||
"""Runs the forward pass."""
|
||||
return F.multilabel_margin_loss(input, target, reduction=self.reduction)
|
||||
@@ -1173,9 +1155,6 @@ class SoftMarginLoss(_Loss):
|
||||
|
||||
__constants__ = ["reduction"]
|
||||
|
||||
def __init__(self, size_average=None, reduce=None, reduction: str = "mean") -> None:
|
||||
super().__init__(size_average, reduce, reduction)
|
||||
|
||||
def forward(self, input: Tensor, target: Tensor) -> Tensor:
|
||||
"""Runs the forward pass."""
|
||||
return F.soft_margin_loss(input, target, reduction=self.reduction)
|
||||
@@ -1432,15 +1411,6 @@ class MultiLabelSoftMarginLoss(_WeightedLoss):
|
||||
|
||||
__constants__ = ["reduction"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
weight: Optional[Tensor] = None,
|
||||
size_average=None,
|
||||
reduce=None,
|
||||
reduction: str = "mean",
|
||||
) -> None:
|
||||
super().__init__(weight, size_average, reduce, reduction)
|
||||
|
||||
def forward(self, input: Tensor, target: Tensor) -> Tensor:
|
||||
"""Runs the forward pass."""
|
||||
return F.multilabel_soft_margin_loss(
|
||||
|
||||
@@ -9,9 +9,6 @@ import torch.distributed.distributed_c10d as c10d
|
||||
|
||||
|
||||
class MockProcessGroup(dist.ProcessGroup):
|
||||
def __init__(self, rank, world):
|
||||
super().__init__(rank, world)
|
||||
|
||||
def getBackendName(self):
|
||||
return "mock_process_group"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user