Files
neuralchen-SimSwap/Conv2d.patch
T
chenxuanhong 3ee304b0e1 update
2021-06-09 13:12:21 +08:00

140 lines
6.7 KiB
Diff

--- /usr/local/lib/python3.5/dist-packages/torch/nn/modules/conv.py
+++ /usr/local/lib/python3.5/dist-packages/torch/nn/modules/conv.py
@@ -15,8 +15,6 @@
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
-
- This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
@@ -39,7 +37,7 @@
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters, of size:
- :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`.
+ :math:`\left\lfloor\frac{C_\text{out}}{C_\text{in}}\right\rfloor`.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
@@ -47,14 +45,14 @@
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
- Note:
+ .. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
- Note:
+ .. note::
When `groups == in_channels` and `out_channels == K * in_channels`,
where `K` is a positive integer, this operation is also termed in
@@ -64,29 +62,17 @@
a depthwise convolution with a depthwise multiplier `K`, can be constructed by arguments
:math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
- Note:
- In some circumstances when using the CUDA backend with CuDNN, this operator
- may select a nondeterministic algorithm to increase performance. If this is
- undesirable, you can try to make the operation deterministic (potentially at
- a performance cost) by setting ``torch.backends.cudnn.deterministic =
- True``.
- Please see the notes on :doc:`/notes/randomness` for background.
-
+ .. include:: cudnn_deterministic.rst
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
- padding (int or tuple, optional): Zero-padding added to both sides of
- the input. Default: 0
- padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
- ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
+ padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
- groups (int, optional): Number of blocked connections from input
- channels to output channels. Default: 1
- bias (bool, optional): If ``True``, adds a learnable bias to the
- output. Default: ``True``
+ groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
+ bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
@@ -102,18 +88,16 @@
Attributes:
weight (Tensor): the learnable weights of the module of shape
- :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
- :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
- The values of these weights are sampled from
- :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
- :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
- bias (Tensor): the learnable bias of the module of shape
- (out_channels). If :attr:`bias` is ``True``,
- then the values of these weights are
- sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
- :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
+ (out_channels, in_channels, kernel_size[0], kernel_size[1]).
+ The values of these weights are sampled from
+ :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
+ :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
+ bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
+ then the values of these weights are
+ sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
+ :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
- Examples:
+ Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv2d(16, 33, 3, stride=2)
@@ -130,34 +114,18 @@
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
- def __init__(
- self,
- in_channels: int,
- out_channels: int,
- kernel_size: _size_2_t,
- stride: _size_2_t = 1,
- padding: _size_2_t = 0,
- dilation: _size_2_t = 1,
- groups: int = 1,
- bias: bool = True,
- padding_mode: str = 'zeros' # TODO: refine this type
- ):
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
+ padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
- False, _pair(0), groups, bias, padding_mode)
+ False, _pair(0), groups, bias)
- def _conv_forward(self, input, weight):
- if self.padding_mode != 'zeros':
- return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
- weight, self.bias, self.stride,
- _pair(0), self.dilation, self.groups)
- return F.conv2d(input, weight, self.bias, self.stride,
+ @weak_script_method
+ def forward(self, input):
+ return F.conv2d(input, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
- def forward(self, input: Tensor) -> Tensor:
- return self._conv_forward(input, self.weight)
-