Skip to content

Commit

Permalink
[Docs] Update mm family ops and F.linear to note limited sparse suppo…
Browse files Browse the repository at this point in the history
…rt. (pytorch#86220)

Pull Request resolved: pytorch#86220
Approved by: https://github.com/cpuhrsch
  • Loading branch information
amjames authored and pytorchmergebot committed Oct 18, 2022
1 parent a73ca6f commit db65909
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 5 deletions.
32 changes: 29 additions & 3 deletions torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,14 @@ def merge_dicts(*dicts):
See :doc:`/notes/randomness` for more information.""",
}

sparse_support_notes = {
"sparse_beta_warning": """
.. warning::
Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
or may not have autograd support. If you notice missing functionality please
open a feature request.""",
}

add_docstr(
torch.abs,
r"""
Expand Down Expand Up @@ -534,6 +542,12 @@ def merge_dicts(*dicts):
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
:attr:`input` is sparse the result will have the same layout and if :attr:`out`
is provided it must have the same layout as :attr:`input`.
{sparse_beta_warning}
{tf32_note}
{rocm_fp16_note}
Expand All @@ -557,7 +571,7 @@ def merge_dicts(*dicts):
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
**common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
),
)

Expand Down Expand Up @@ -7462,6 +7476,12 @@ def merge_dicts(*dicts):
Supports strided and sparse 2-D tensors as inputs, autograd with
respect to strided inputs.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
If :attr:`out` is provided it's layout will be used. Otherwise, the result
layout will be deduced from that of :attr:`input`.
{sparse_beta_warning}
{tf32_note}
{rocm_fp16_note}
Expand All @@ -7481,7 +7501,7 @@ def merge_dicts(*dicts):
tensor([[ 0.4851, 0.5037, -0.3633],
[-0.0760, -3.6705, 2.4784]])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
**common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
),
)

Expand Down Expand Up @@ -7538,6 +7558,12 @@ def merge_dicts(*dicts):
tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
as :func:`torch.mm`
{sparse_beta_warning}
{tf32_note}
{rocm_fp16_note}
Expand Down Expand Up @@ -7582,7 +7608,7 @@ def merge_dicts(*dicts):
torch.Size([10, 3, 5])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
**common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes
),
)

Expand Down
8 changes: 6 additions & 2 deletions torch/nn/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch
from torch import _VF
from torch._C import _infer_size, _add_docstr
from torch._torch_docs import reproducibility_notes, tf32_notes
from torch._torch_docs import reproducibility_notes, tf32_notes, sparse_support_notes
# A workaround to support both TorchScript and MyPy:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
Expand Down Expand Up @@ -1997,6 +1997,10 @@ def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
This opperation supports 2-D :attr:`weight` with :ref:`sparse layout<sparse-docs>`
{sparse_beta_warning}
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
Shape:
Expand All @@ -2006,7 +2010,7 @@ def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
- Weight: :math:`(out\_features, in\_features)` or :math:`(in\_features)`
- Bias: :math:`(out\_features)` or :math:`()`
- Output: :math:`(*, out\_features)` or :math:`(*)`, based on the shape of the weight
""")
""".format(**sparse_support_notes))


bilinear = _add_docstr(
Expand Down

0 comments on commit db65909

Please sign in to comment.