# Create a randomized tensor between 0 and 1. random_tensor_1 = torch.rand(2, 3, 4) random_tensor_2 = torch.rand(2, 3, 4, dtype=torch.float16) print(random_tensor_1) print(random_tensor_2)
# Using rand_like to keep the same size random_tensor_3 = torch.rand_like(random_tensor_2) print(random_tensor_3)
# Random tensor with a uniform distribution of integers. random_tensor_4 = torch.randint(1, 100, (3, 4, 4)) random_tensor_5 = torch.randint_like(random_tensor_4, high=10, low=1) print(random_tensor_4) print(random_tensor_5)
# Random tensor with a normal distribution. random_tensor_6 = torch.randn(2, 3) print(random_tensor_6) random_tensor_7 = torch.randn(10000000) print(random_tensor_7) print(f"Average: {random_tensor_7.mean():.4f}") print(f"Standard deviation: {random_tensor_7.std():.4f}")
# customizing mean and std, or using rand_like mean, std = 5.0, 2.0 random_tensor_8 = mean + std * random_tensor_6 random_tensor_9 = torch.randn_like(random_tensor_3) print(random_tensor_9)
# Returns a tensor of random numbers drawn from separate normal distributions, independently sample each element. means = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) std = torch.tensor([[1, 2], [4, 5]]) custom_normal = torch.normal(means, std) print(custom_normal)
1 2 3 4 5 6
# For data shuffle perm = torch.randperm(5) print(perm) data = torch.tensor([10, 20, 30, 40, 50]) shuffled = data[torch.randperm(len(data))] print(shuffled)
Well, you can also sample from different distributions…
torch.bernoulli()
torch.poisson()
torch.multinomial()
In [ ]:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
from torch.distributions import Normal, Uniform, Gamma
# Normal Distribution normal_dist = Normal(loc=0.0, scale=1.0) samples = normal_dist.sample((2, 3)) print(samples)
# First create an uninitialized tensor, then fill it randomly. uninitialized = torch.empty(2, 3)
# from the continuous uniform distribution uninitialized.uniform_() # rom the normal distribution parameterized uninitialized.normal_()
# Or you can use the random methods directly random_tensor = torch.Tensor(2, 3).uniform_(-1, 1) random_tensor = torch.Tensor(2, 3).normal_(0, 1) random_tensor = torch.Tensor(2, 3).exponential_(1)
Check tensor information
You can use the following methods to check various information about the tensor!
x = torch.tensor(2.0, requires_grad=True) y = x**2 + 3 * x + 1 y.backward() print(y.shape) print(x.grad)
Broadcasting mechanism
The same as numpy.
Broadcasting follows the following rules:
Start comparing from the trailing dimensions: Begin comparing the dimensions of tensors from the rightmost dimension and move leftward.
Dimension compatibility conditions:
The two dimensions are equal.
One of the dimensions is 1.
One of the dimensions does not exist (i.e., the tensors have different numbers of dimensions).
If none of the conditions are met, an error is raised.
In [ ]:
1 2 3 4 5 6
A = torch.randn(1, 3) B = torch.randn(3, 1) C = torch.randn(1, 2)
print(A + B) print(B + C)
Changing Tensors’ shape
Matrix multiplication is one of the most common operations in PyTorch, so sometimes we need to change the shape of the created tensors.
unsqueeze(dim): Adds a new dimension of size 1 at the specified position.
In [ ]:
1 2 3 4 5
x = torch.tensor([1, 2, 3]) y = x.unsqueeze(0) # shape: (1, 3) z = x.unsqueeze(1) # shape: (3, 1) print(y.shape) print(z.shape)
squeeze(dim): Removes dimensions of size 1. If no dim is given, all singleton dimensions are removed.
In [ ]:
1 2 3 4 5 6 7 8 9
a = torch.rand(1, 3, 1, 4) b = a.squeeze(0) # shape: (3, 1, 4) d = a.squeeze(1) # it will do nothing c = a.squeeze() # shape: (3, 4) (all size-1 dims removed)
repeat: Repeats the entire tensor along specified dimensions
In PyTorch, the function repeat() is used to replicate the entire tensor along specified dimensions, thereby expanding the shape of the tensor. It differs from repeat_interleave(), where repeat() performs overall replication, while repeat_interleave() does element-wise replication.
repeat(*sizes) accepts a tuple parameter that indicates the number of repetitions for each dimension and returns a new tensor.