Search notes:

Python library: torch (PyTorch)

pip3 install torch
pip3 install torch torchvision
pip3 install torch torchvision torchaudio

Members

abs()
abs_()
absolute()
acos()
acos_()
acosh()
acosh_()
adaptive_avg_pool1d()
_adaptive_avg_pool2d()
_adaptive_avg_pool3d()
adaptive_max_pool1d()
add()
_add_batch_dim()
addbmm()
addcdiv()
addcmul()
addmm()
_addmm_activation()
addmv()
addmv_()
addr()
_add_relu()
_add_relu_()
adjoint()
affine_grid_generator()
AggregationType ?
alias_copy()
AliasDb ?
align_tensors()
all()
allclose()
alpha_dropout()
alpha_dropout_()
amax()
amin()
_aminmax()
aminmax()
amp Module
_amp_foreach_non_finite_check_and_unscale_()
_amp_update_scale_()
angle()
Any ?
any()
AnyType ?
ao Module
arange()
arccos()
arccos_()
arccosh()
arccosh_()
arcsin()
arcsin_()
arcsinh()
arcsinh_()
arctan()
arctan_()
arctan2()
arctanh()
arctanh_()
are_deterministic_algorithms_enabled()
argmax()
argmin()
argsort()
Argument ?
ArgumentSpec ?
argwhere()
asarray()
asin()
asin_()
asinh()
asinh_()
_assert()
_assert_async()
_assert_tensor_metadata()
as_strided()
as_strided_()
as_strided_copy()
as_strided_scatter()
as_tensor()
atan()
atan_()
atan2()
atanh()
atanh_()
atleast_1d()
atleast_2d()
atleast_3d()
attr str object
autocast torch.amp.autocast_mode.autocast class
autocast_decrement_nesting()
autocast_increment_nesting()
autograd Module
AVG ?
avg_pool1d()
_awaits Module
AwaitType ?
backends Module
baddbmm()
bartlett_window()
batch_norm()
batch_norm_backward_elemt()
batch_norm_backward_reduce()
batch_norm_elemt()
batch_norm_gather_stats()
batch_norm_gather_stats_with_counts()
_batch_norm_impl_index()
batch_norm_stats()
batch_norm_update_stats()
BenchmarkConfig ?
BenchmarkExecutionStats ?
bernoulli()
bfloat16 ?
BFloat16Storage ?
BFloat16Tensor ?
bilinear()
binary_cross_entropy_with_logits()
bincount()
binomial()
bitwise_and()
bitwise_left_shift()
bitwise_not()
bitwise_or()
bitwise_right_shift()
bitwise_xor()
blackman_window()
Block ?
block_diag()
bmm() Batch matrix-matrix product. Compare mm()
bool ?
BoolStorage ?
BoolTensor ?
BoolType ?
broadcast_shapes()
broadcast_tensors()
broadcast_to()
bucketize()
BufferDict ?
builtins Module
ByteStorage ?
ByteTensor ?
_C Module
Callable ?
CallStack ?
can_cast()
candidate()
Capsule ?
cartesian_prod()
_cast_Byte()
_cast_Char()
_cast_Double()
_cast_Float()
_cast_Half()
_cast_Int()
_cast_Long()
_cast_Short()
cat() Concatenate a sequence of tensors along a given dimension. Compare with stack()
ccol_indices_copy()
cdist()
cdouble ?
ceil()
ceil_()
celu()
celu_()
cfloat ?
chain_matmul()
chalf ?
channel_shuffle()
channels_last ?
channels_last_3d ?
CharStorage ?
CharTensor ?
cholesky()
cholesky_inverse()
cholesky_solve()
choose_qparams_optimized()
_choose_qparams_per_tensor()
chunk()
_chunk_grad_outputs_efficient_attention()
clamp()
clamp_()
clamp_max()
clamp_max_()
clamp_min()
clamp_min_()
_classes Module
classes Module
classproperty()
ClassType ?
clear_autocast_cache()
clip()
clip_()
clone()
_coalesce()
Code ?
col_indices_copy()
column_stack()
combinations()
CompilationUnit ?
compile()
compiled_with_cxx11_abi()
CompleteArgumentSpec ?
complex()
complex128 ?
complex32 ?
complex64 ?
ComplexDoubleStorage ?
ComplexFloatStorage ?
ComplexType ?
_compute_linear_combination()
concat()
concatenate()
ConcreteModuleType ?
ConcreteModuleTypeBuilder ?
_conj()
conj()
_conj_copy()
_conj_physical()
conj_physical()
conj_physical_()
constant_pad_nd()
contiguous_format ?
conv1d()
conv2d()
conv3d()
_convert_indices_from_coo_to_csr()
_convert_indices_from_csr_to_coo()
_convolution()
convolution()
_convolution_mode()
conv_tbc()
conv_transpose1d()
conv_transpose2d()
conv_transpose3d()
_copy_from()
_copy_from_and_resize()
copysign()
corrcoef()
cos()
cos_()
cosh()
cosh_()
cosine_embedding_loss()
cosine_similarity()
count_nonzero()
cov()
cpp Module
cpu Module
cross()
crow_indices_copy()
_ctc_loss()
ctc_loss()
ctypes Module
cuda Module
cudnn_affine_grid_generator()
cudnn_batch_norm()
cudnn_convolution()
cudnn_convolution_add_relu()
cudnn_convolution_relu()
cudnn_convolution_transpose()
_cudnn_ctc_loss()
cudnn_grid_sampler()
_cudnn_init_dropout_state()
cudnn_is_acceptable()
_cudnn_rnn()
_cudnn_rnn_flatten_weight()
_cufft_clear_plan_cache()
_cufft_get_plan_cache_max_size()
_cufft_get_plan_cache_size()
_cufft_set_plan_cache_max_size()
cummax()
_cummax_helper()
cummin()
_cummin_helper()
cumprod()
cumsum()
cumulative_trapezoid()
_debug_has_internal_overlap()
_decomp Module
DeepCopyMemoTable ?
default_generator ?
deg2rad()
deg2rad_()
dequantize()
DeserializationStorageContext ?
det()
detach()
detach_()
detach_copy()
device A torch.device object (cpu, cuda or mps) which represents the device on which the tensor is (or will be) allocated.
DeviceObjType ?
diag()
diag_embed()
diagflat()
diagonal()
diagonal_copy()
diagonal_scatter()
Dict ?
DictType ?
diff()
digamma()
_dim_arange()
_dirichlet_grad()
_disable_functionalization()
DisableTorchFunction torch._C.DisableTorchFunction class
DisableTorchFunctionSubclass torch._C.DisableTorchFunctionSubclass class
_dispatch Module
DispatchKey ?
DispatchKeySet ?
dist()
distributed Module
distributions Module
div()
divide()
dot()
double ?
DoubleStorage ?
DoubleTensor ?
dropout()
dropout_()
dsmm()
dsplit()
dstack()
dtype torch.dtype class
e float object
_efficientzerotensor()
eig()
einsum()
embedding()
_embedding_bag()
embedding_bag()
_embedding_bag_forward_only()
embedding_renorm_()
empty()
_empty_affine_quantized()
empty_like()
_empty_per_channel_affine_quantized()
empty_quantized()
empty_strided()
_enable_functionalization()
enable_grad torch.autograd.grad_mode.enable_grad class. Compare with no_grad, set_grad_enabled and inference_mode
EnumType ?
eq()
equal()
erf()
erf_()
erfc()
erfc_()
erfinv()
ErrorReport ?
_euclidean_dist()
ExcludeDispatchKeyGuard ?
ExecutionPlan ?
exp()
exp_()
exp2()
exp2_()
expand_copy()
expm1()
expm1_()
eye()
_fake_quantize_learnable_per_channel_affine()
_fake_quantize_learnable_per_tensor_affine()
fake_quantize_per_channel_affine()
fake_quantize_per_tensor_affine()
_fake_quantize_per_tensor_affine_cachemask_tensor_qparams()
FatalError torch.FatalError class
fbgemm_linear_fp16_weight()
fbgemm_linear_fp16_weight_fp32_activation()
fbgemm_linear_int8_weight()
fbgemm_linear_int8_weight_fp32_activation()
fbgemm_linear_quantize_weight()
fbgemm_pack_gemm_matrix_fp16()
fbgemm_pack_quantized_matrix()
feature_alpha_dropout()
feature_alpha_dropout_()
feature_dropout()
feature_dropout_()
fft Module
_fft_c2c()
_fft_c2r()
_fft_r2c()
FileCheck ?
fill()
fill_()
finfo torch.finfo class
fix()
fix_()
flatten()
flip()
fliplr()
flipud()
float ?
float16 ?
float32 ?
float64 ?
float_power()
FloatStorage ?
FloatTensor ?
FloatType ?
floor()
floor_()
floor_divide()
fmax()
fmin()
fmod()
_foobar()
_foreach_abs()
_foreach_abs_()
_foreach_acos()
_foreach_acos_()
_foreach_add()
_foreach_add_()
_foreach_addcdiv()
_foreach_addcdiv_()
_foreach_addcmul()
_foreach_addcmul_()
_foreach_asin()
_foreach_asin_()
_foreach_atan()
_foreach_atan_()
_foreach_ceil()
_foreach_ceil_()
_foreach_clamp_max()
_foreach_clamp_max_()
_foreach_clamp_min()
_foreach_clamp_min_()
_foreach_cos()
_foreach_cos_()
_foreach_cosh()
_foreach_cosh_()
_foreach_div()
_foreach_div_()
_foreach_erf()
_foreach_erf_()
_foreach_erfc()
_foreach_erfc_()
_foreach_exp()
_foreach_exp_()
_foreach_expm1()
_foreach_expm1_()
_foreach_floor()
_foreach_floor_()
_foreach_frac()
_foreach_frac_()
_foreach_lerp()
_foreach_lerp_()
_foreach_lgamma()
_foreach_lgamma_()
_foreach_log()
_foreach_log_()
_foreach_log10()
_foreach_log10_()
_foreach_log1p()
_foreach_log1p_()
_foreach_log2()
_foreach_log2_()
_foreach_maximum()
_foreach_maximum_()
_foreach_minimum()
_foreach_minimum_()
_foreach_mul()
_foreach_mul_()
_foreach_neg()
_foreach_neg_()
_foreach_norm()
_foreach_reciprocal()
_foreach_reciprocal_()
_foreach_round()
_foreach_round_()
_foreach_sigmoid()
_foreach_sigmoid_()
_foreach_sin()
_foreach_sin_()
_foreach_sinh()
_foreach_sinh_()
_foreach_sqrt()
_foreach_sqrt_()
_foreach_sub()
_foreach_sub_()
_foreach_tan()
_foreach_tan_()
_foreach_tanh()
_foreach_tanh_()
_foreach_trunc()
_foreach_trunc_()
_foreach_zero_()
fork()
frac()
frac_()
_freeze_functional_tensor()
frexp()
frobenius_norm()
frombuffer()
from_dlpack()
from_file()
_from_functional_tensor()
from_numpy() Compare with Tensor.numpy()
full()
full_like()
func Module
functional Module
FunctionSchema ?
_functorch Module
_fused_adam_()
_fused_adamw_()
_fused_dropout()
fused_moving_avg_obs_fake_quant()
_fused_moving_avg_obs_fq_helper()
_fused_sdp_choice()
Future ?
futures Module
FutureType ?
_fw_primal_copy()
fx Module
gather()
gcd()
gcd_()
ge()
Generator torch._C.Generator class
geqrf()
ger()
get_autocast_cpu_dtype()
get_autocast_gpu_dtype()
get_default_dtype()
get_deterministic_debug_mode()
get_device()
get_file_path()
get_float32_matmul_precision()
get_num_interop_threads()
get_num_threads()
get_rng_state()
_GLOBAL_DEVICE_CONTEXT NoneType object
Gradient ?
gradient()
Graph ?
GraphExecutorState ?
greater()
greater_equal()
grid_sampler()
grid_sampler_2d()
_grid_sampler_2d_cpu_fallback()
grid_sampler_3d()
group_norm()
gru()
gru_cell()
gt()
_guards Module
half ?
HalfStorage ?
HalfTensor ?
hamming_window()
hann_window()
hardshrink()
_has_compatible_shallow_copy_type()
has_cuda bool object
has_cudnn bool object
has_lapack bool object
has_mkl bool object
has_mkldnn bool object
has_mps bool object
has_openmp bool object
has_spectral bool object
heaviside()
hinge_embedding_loss()
histc()
histogram()
histogramdd()
_histogramdd_bin_edges()
_histogramdd_from_bin_cts()
_histogramdd_from_bin_tensors()
hsmm()
hsplit()
hspmm()
hstack()
hub Module
hypot()
i0()
i0_()
igamma()
igammac()
iinfo torch.iinfo class
imag()
_import_dotted_name()
import_ir_module()
import_ir_module_from_buffer()
index_add()
index_copy()
index_fill()
index_put()
index_put_()
_index_put_impl_()
index_reduce()
index_select()
_indices_copy()
indices_copy()
inf float object
inference_mode torch.autograd.grad_mode.inference_mode class. Compare with enable_grad, no_grad and set_grad_enabled`.
InferredType ?
_initExtension()
initial_seed()
init_num_threads()
inner()
inspect Module
instance_norm()
int ?
int16 ?
int32 ?
int64 ?
int8 ?
InterfaceType ?
int_repr()
IntStorage ?
IntTensor ?
IntType ?
inverse()
IODescriptor ?
_is_all_true()
is_anomaly_check_nan_enabled()
is_anomaly_enabled()
_is_any_true()
is_autocast_cache_enabled()
is_autocast_cpu_enabled()
is_autocast_enabled()
isclose()
is_complex()
is_conj()
is_deterministic_algorithms_warn_only_enabled()
is_distributed()
isfinite()
is_floating_point()
_is_functional_tensor()
is_grad_enabled()
isin()
isinf()
is_inference()
is_inference_mode_enabled()
isnan()
is_neg()
isneginf()
is_nonzero()
isposinf()
isreal()
is_same_size()
is_signed()
is_storage()
is_tensor()
istft()
is_vulkan_available()
is_warn_always_enabled()
_is_zerotensor()
jit Module
JITException torch.jit.Error class
_jit_internal Module
kaiser_window()
kl_div()
kron()
kthvalue()
layer_norm()
layout torch.layout class
lcm()
lcm_()
ldexp()
ldexp_()
le()
legacy_contiguous_format ?
lerp()
less()
less_equal()
lgamma()
library Module
linalg Module
_linalg_check_errors()
_linalg_det()
_linalg_eigh()
_linalg_slogdet()
_linalg_solve_ex()
_linalg_svd()
_linalg_utils Module
linspace()
ListType ?
LiteScriptModule ?
load() load an object from a file that was saved with torch.save()
_load_global_deps()
_lobpcg Module
lobpcg()
LockingLogger ?
log()
log_()
log10()
log10_()
log1p()
log1p_()
log2()
log2_()
logaddexp()
logaddexp2()
_logcumsumexp()
logcumsumexp()
logdet()
LoggerBase ?
logical_and()
logical_not()
logical_or()
logical_xor()
logit()
logit_()
_log_softmax()
log_softmax()
_log_softmax_backward_data()
logspace()
logsumexp()
long ?
LongStorage ?
LongTensor ?
_lowrank Module
lstm()
lstm_cell()
_lstm_mps()
lstsq()
lt()
lu()
lu_solve()
lu_unpack()
_lu_with_info()
_make_dual()
_make_dual_copy()
_make_per_channel_quantized_tensor()
_make_per_tensor_quantized_tensor()
manual_seed() Seeds the random number generator and returns a torch.Generator object. See also David Picard: torch.manual_seed(3407) is all you need: On the influence of random seeds in deep learning architectures for computer vision.
margin_ranking_loss()
masked Module
masked_fill()
_masked_scale()
masked_scatter()
masked_select()
_masked_softmax()
math Module
matmul() Matrix mulitplication, dot product, matrix-vector product, depending on input shapes, with broadcasting. Compare with mm()
matrix_exp()
matrix_power()
matrix_rank()
max()
maximum()
max_pool1d()
max_pool1d_with_indices()
max_pool2d()
max_pool3d()
mean()
median()
memory_format torch.memory_format class
merge_type_from_type_comment()
meshgrid()
_meta_registrations Module
min()
minimum()
miopen_batch_norm()
miopen_convolution()
miopen_convolution_add_relu()
miopen_convolution_relu()
miopen_convolution_transpose()
miopen_depthwise_convolution()
miopen_rnn()
_mkldnn ?
mkldnn_adaptive_avg_pool2d()
mkldnn_convolution()
mkldnn_linear_backward_weights()
mkldnn_max_pool2d()
mkldnn_max_pool3d()
_mkldnn_reshape()
mkldnn_rnn_layer()
_mkldnn_transpose()
_mkldnn_transpose_()
mm() Matrix mulitplication (no broadcasting). Compare with matmul(), smm() and bmm()
mode()
ModuleDict ?
moveaxis()
movedim()
_mps_convolution()
_mps_convolution_transpose()
msort()
mul()
multinomial()
multiply()
multiprocessing Module
mv()
mvlgamma()
name str object
_namedtensor_internals Module
nan float object
nanmean()
nanmedian()
nanquantile()
nansum()
nan_to_num()
nan_to_num_()
narrow()
narrow_copy()
native_batch_norm()
_native_batch_norm_legit()
native_channel_shuffle()
_native_decoder_only_multi_head_attention()
native_dropout()
native_group_norm()
native_layer_norm()
_native_multi_head_attention()
native_norm()
ne()
neg()
neg_()
negative()
negative_()
_neg_view()
_neg_view_copy()
nested Module
_nested_from_padded()
_nested_from_padded_and_nested_example()
_nested_tensor_from_mask()
_nested_tensor_from_mask_left_aligned()
_nested_tensor_from_tensor_list()
_nested_tensor_softmax_with_shape()
nextafter()
nn A module, which contains, for example torch.nn.Parameter, torch.nn.Linear, torch.nn.MSELoss or torch.nn.Module
_nnpack_available()
_nnpack_spatial_convolution()
Node ?
no_grad Compare with using torch.no_grad() as a decorator. see also with enable_grad, set_grad_enabled and inference_mode`
NoneType ?
nonzero()
NoopLogger ?
norm()
normal()
norm_except_dim()
not_equal()
nuclear_norm()
NumberType ?
numel()
obj()
ones() Create a tensor with a given shape, filled with the value 1. Compare with zeros()
ones_like()
onnx Module
OperatorInfo ?
_ops Module
ops Module
optim Module
Optional ?
OptionalType ?
orgqr()
ormqr()
os Module
outer()
overrides Module
package Module
_pack_padded_sequence()
_pad_packed_sequence()
pairwise_distance()
ParameterDict ?
parse_ir()
parse_schema()
parse_type_comment()
pca_lowrank()
pdist()
per_channel_affine ?
per_channel_affine_float_qparams ?
per_channel_symmetric ?
permute()
permute_copy()
per_tensor_affine ?
per_tensor_symmetric ?
pi float object
_pin_memory()
pinverse()
pixel_shuffle()
pixel_unshuffle()
platform Module
poisson()
poisson_nll_loss()
polar()
polygamma()
positive()
pow()
_preload_cuda_deps()
prelu()
_prelu_kernel()
prepare_multiprocessing_environment()
preserve_format ?
_prims Module
_prims_common Module
PRIVATE_OPS tuple object
prod()
profiler Module
promote_types()
put()
py_float float class
py_int int class
PyObjectType ?
PyTorchFileReader ?
PyTorchFileWriter ?
qint32 ?
QInt32Storage ?
qint8 ?
QInt8Storage ?
q_per_channel_axis()
q_per_channel_scales()
q_per_channel_zero_points()
qr()
q_scale()
qscheme torch.qscheme class
quantile()
quantization Module
quantized_batch_norm()
quantized_gru ?
quantized_gru_cell()
quantized_lstm ?
quantized_lstm_cell()
quantized_max_pool1d()
quantized_max_pool2d()
quantized_rnn_relu_cell()
quantized_rnn_tanh_cell()
quantize_per_channel()
quantize_per_tensor()
quantize_per_tensor_dynamic()
quasirandom Module
quint2x4 ?
QUInt2x4Storage ?
quint4x2 ?
QUInt4x2Storage ?
quint8 ?
QUInt8Storage ?
q_zero_point()
rad2deg()
rad2deg_()
random Module
randperm() Random permuation of integers in range 0 … n
range()
ravel()
read_vitals()
real()
reciprocal()
reciprocal_()
_refs Module
_register_device_module()
relu()
relu_()
remainder()
_remove_batch_dim()
renorm()
repeat_interleave()
reshape()
_reshape_alias_copy()
_reshape_from_tensor()
resize_as_()
resize_as_sparse_()
_resize_output_()
resolve_conj()
resolve_neg()
result_type()
return_types Module
rnn_relu()
rnn_relu_cell()
rnn_tanh()
rnn_tanh_cell()
roll()
rot90()
round()
round_()
row_indices_copy()
row_stack()
_rowwise_prune()
RRefType ?
rrelu()
rrelu_()
rsqrt()
rsqrt_()
rsub()
saddmm()
_sample_dirichlet()
_saturate_weight_to_fp16()
save() Saves an object to a file (typically with a .pt, or less desirably a .pth extension) using pickle. The object can then be loaded again with torch.load(). Compare with torch.nn.Module.load_state_dict().
scalar_tensor()
_scaled_dot_product_attention_math()
_scaled_dot_product_efficient_attention()
_scaled_dot_product_flash_attention()
scatter()
scatter_add()
scatter_reduce()
ScriptClass ?
ScriptClassFunction ?
ScriptDict ?
ScriptDictIterator ?
ScriptDictKeyIterator ?
ScriptFunction ?
ScriptList ?
ScriptListIterator ?
ScriptMethod ?
ScriptModule ?
ScriptModuleSerializer ?
ScriptObject ?
ScriptObjectProperty ?
searchsorted()
seed()
_segment_reduce()
segment_reduce()
select()
select_copy()
select_scatter()
selu()
selu_()
serialization Module
SerializationStorageContext ?
Set ?
set_anomaly_enabled()
set_autocast_cache_enabled()
set_autocast_cpu_dtype()
set_autocast_cpu_enabled()
set_autocast_enabled()
set_autocast_gpu_dtype()
set_default_device()
set_default_dtype()
set_default_tensor_type()
set_deterministic_debug_mode()
set_float32_matmul_precision()
set_flush_denormal()
set_grad_enabled torch.autograd.grad_mode.set_grad_enabled class. Compare with enable_grad, no_grad and inference_mode`
set_num_interop_threads()
set_num_threads()
set_printoptions() Set options that influence representations of data and structures when printing objects: precision, threshold, edgeitem, linewidth, profile, sci_mode.
set_rng_state()
set_vital()
set_warn_always()
sgn()
_shape_as_tensor()
short ?
ShortStorage ?
ShortTensor ?
sigmoid() An alias for torch.special.expit()
sigmoid_()
sign()
signal Module
signbit()
sin()
sin_()
sinc()
sinc_()
sinh()
sinh_()
Size torch.Size class
slice_copy()
slice_scatter()
slogdet()
smm() Matrix multiplication of a sparse and a dense matrix. Compare with mm()
_sobol_engine_draw()
_sobol_engine_ff_()
_sobol_engine_initialize_state_()
_sobol_engine_scramble_()
_softmax()
softmax()
_softmax_backward_data()
solve()
sort()
_sources Module
sparse Module
_sparse_broadcast_to()
_sparse_broadcast_to_copy()
sparse_bsc ?
sparse_bsc_tensor()
sparse_bsr ?
sparse_bsr_tensor()
sparse_compressed_tensor()
sparse_coo ?
sparse_coo_tensor()
_sparse_coo_tensor_unsafe()
sparse_csc ?
sparse_csc_tensor()
sparse_csr ?
_sparse_csr_prod()
_sparse_csr_sum()
sparse_csr_tensor()
_sparse_log_softmax_backward_data()
_sparse_softmax_backward_data()
_sparse_sparse_matmul()
_sparse_sum()
special A module which is modeled after SciPy's special module.
split()
split_copy()
split_with_sizes()
split_with_sizes_copy()
spmm()
sqrt()
sqrt_()
square()
square_()
squeeze()
squeeze_copy()
sspaddmm()
_stack()
stack() Compare with cat()
_standard_gamma()
_standard_gamma_grad()
StaticModule ?
std()
std_mean()
stft()
Storage ?
storage Module
StorageBase ?
_storage_classes set object
Stream torch.Stream class
StreamObjType ?
strided ?
StringType ?
sub()
_subclasses Module
subtract()
SUM ?
sum()
svd()
svd_lowrank()
swapaxes()
swapdims()
SymBool torch.SymBool class
symeig()
SymFloat torch.SymFloat class
sym_float()
SymInt torch.SymInt class
sym_int()
SymIntType ?
sym_max()
sym_min()
sym_not()
_sync()
sys Module
t()
Tag ?
take()
take_along_dim()
tan()
tan_()
tanh()
tanh_()
t_copy()
Tensor ?
_tensor Module
tensor()
_tensor_classes set object
tensordot()
tensor_split()
_tensor_str Module
TensorType ?
_test_autograd_multiple_dispatch()
_test_autograd_multiple_dispatch_view()
_test_autograd_multiple_dispatch_view_copy()
_test_check_tensor()
testing Module
_test_serialization_subcmul()
textwrap Module
threshold()
threshold_()
ThroughputBenchmark ?
tile()
_to_cpu()
to_dlpack()
_to_functional_tensor()
topk()
torch Module
_TorchCompileInductorWrapper torch._TorchCompileInductorWrapper class
torch_version Module
trace()
TracingState ?
_transform_bias_rescale_qkv()
_transformer_decoder_only_layer_fwd()
_transformer_encoder_layer_fwd()
transpose()
transpose_copy()
trapezoid()
trapz()
triangular_solve()
tril()
tril_indices()
_trilinear()
triplet_margin_loss()
_triton_multi_head_attention()
_triton_scaled_dot_attention()
triu()
triu_indices()
true_divide()
trunc()
trunc_()
TupleType ?
Type ?
TYPE_CHECKING bool object
TypedStorage torch.storage.TypedStorage class
typename()
types Module
uint8 ?
unbind()
unbind_copy()
unflatten()
unfold_copy()
unify_type_list()
Union ?
UnionType ?
_unique()
unique()
_unique2()
unique_consecutive()
_unpack_dual()
unsafe_chunk()
unsafe_split()
unsafe_split_with_sizes()
unsqueeze()
unsqueeze_copy()
UntypedStorage ?
Use ?
_use_cudnn_ctc_loss()
_use_cudnn_rnn_flatten_weight()
use_deterministic_algorithms()
USE_GLOBAL_DEPS bool object
USE_RTLD_GLOBAL_WITH_LIBTORCH bool object
_utils Module
utils Module
_utils_internal Module
_validate_compressed_sparse_indices()
_validate_sparse_bsc_tensor_args()
_validate_sparse_bsr_tensor_args()
_validate_sparse_compressed_tensor_args()
_validate_sparse_coo_tensor_args()
_validate_sparse_csc_tensor_args()
_validate_sparse_csr_tensor_args()
Value ?
_values_copy()
values_copy()
vander()
var()
var_mean()
vdot()
version Module
_VF Module
view_as_complex()
view_as_complex_copy()
view_as_real()
view_as_real_copy()
view_copy()
vitals_enabled()
vmap()
_vmap_internals Module
vsplit()
vstack()
wait()
_warn_typed_storage_removal()
_weight_norm()
_weight_norm_interface()
_weights_only_unpickler Module
where()
windows Module
xlogy()
xlogy_()
zero_()
zeros() Create a tensor with a given shape, filled with the value 0. Compare with ones()
zeros_like()
See also torch functions that create tensors filled with random numbers

Data types (dtype)

Although Python does not distinguish between floats and doubles (they're called floats, but are double precision), In torch, there is such a distinction:
>>> i = torch.tensor([ 1  , 2  , 3   ]                    ) ; i.dtype
torch.int64

>>> f = torch.tensor([ 1.1, 2.2, 3.3 ]                    ) ; f.dtype
torch.float32

>>> d = torch.tensor([ 1.1, 2.2, 3.3 ], dtype=torch.double) ; d.dtype
torch.float64

item() / tolist()

tensor.item() obtains the numerical value as a standard Python number if the tensor has one element only:
>>> import torch
>>> t = torch.tensor([ 4.2 ])
>>> t.item()
4.199999809265137
If the tensor has more than one element, item() throws a ValueError exception:
>>> tt = torch.tensor([ 4.2, 5.3, 6.1 ])
>>> tt.item()
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
ValueError: only one element tensors can be converted to Python scalars
tensor.tolist() returns the elements of the tensors as a list:
>>> tt.tolist()
[4.199999809265137, 5.300000190734863, 6.099999904632568]

pow(2).sum().item()

The sequence pow(2).sum().item() might be used to calculate a loss:
>>> import torch

>>> y        = torch.tensor([ 2.0, 5.0,  8.0 ])
>>> expected = torch.tensor([ 3.0, 3.0, 11.0 ])

>>> (y-expected)
tensor([-1.,  2., -3.])

>>> (y-expected).pow(2)
tensor([1., 4., 9.])

>>> (y-expected).pow(2).sum()
tensor([14.])

>>> loss = (y-expected).pow(2).sum().item()
>>> loss.item()
14.0

data.item(), backward(), grad

Import torch and create a few simple tensors:
import torch

x = torch.Tensor([3.0]).double() ; x.requires_grad = True
a = torch.Tensor([4.0]).double() ; a.requires_grad = True
b = torch.Tensor([2.0]).double() ; b.requires_grad = True
c = torch.Tensor([5.0]).double() ; c.requires_grad = True
Let y be dependent on a, b, c and x:
y = a * x**2  +  b * x  +  c
The following prints 47.0 (= 4*32 + 2*3 + 5)
print(y.item())
With y.backward(), it's possible to calculate the gradients of the variables that have an influence on the value of y.
y.backward()
The following prints 26 (= 2ax + b)
print(x.grad.item())
The following prints 9 (=x3):
print(a.grad.item())

device

dev   = torch.device("cpu")
# dev = torch.device("cuda:0") # Uncomment  to run on GPU
…
rnd = torch.randn((), device=dev, dtype=torch.float)

Misc

The PyTorch Foundation is a project of the Linux Foundation.

See also

The Tensor class.

Index