#!/usr/bin/env python # coding: utf-8 # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') # # What is PyTorch? # ================ # # It’s a Python-based scientific computing package targeted at two sets of # audiences: # # - A NumPy replacement that can leverage the power of GPUs # - a deep learning research platform # # # Tensors # --------------- # # Tensors are similar to NumPy’s ndarrays, with the added benefit of the use of a GPU to accelerate computation speed. # # # In[2]: import torch # Construct a 5x3 matrix, uninitialized: # # # In[3]: x = torch.zeros(5, 3) print(x) # Construct a randomly initialized matrix: # # # In[5]: x = torch.rand(5, 3) print(x) # Construct a tensor from data: # In[8]: x = torch.tensor([5.5, 3]) print(x) # or create a tensor based on an existing tensor. These methods # will reuse properties of the input tensor: # # In[9]: # new_* methods create a tensor of the same type: x = x.new_ones(5, 3) print(x) # create a tensor like x and override dtype: x = torch.randn_like(x, dtype=torch.double) print(x) # result has the same size # Get its size: # # # In[12]: print(x.size()) # Note that ``torch.Size`` is in fact a tuple, so it supports all tuple operations. # # ### Operators # # **Addition:** # # In[8]: y = torch.rand(5, 3) print(x + y) # You can also use the torch add function: # # In[9]: print(torch.add(x, y)) # You also have the option of providing an output tensor as argument: # # # In[10]: result = torch.empty(5, 3) torch.add(x, y, out=result) print(result) # Addition: in-place # # # In[11]: # adds x to y y.add_(x) print(y) #

Note

Any operation that mutates a tensor in-place has an added ``_`` in the name. # For example: x.copy_(y), x.t_(), will change x.

# # You can use standard NumPy-like indexing as we have gotten used to: # # # In[12]: print(x[:, 1]) # Resizing: If you want to resize/reshape tensor, you can use ``torch.view``: # # # In[13]: x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # the size -1 is inferred from other dimensions print(x.size(), y.size(), z.size()) # ### Torch documentation # # You can read all about pytorch tensors, including transposing, indexing, slicing, mathematical operations, linear algebra, random numbers, etc., # in the [pytorch documentation](https://pytorch.org/docs/stable/torch.html). # # # ### NumPy Bridge # # Converting a Torch Tensor to a NumPy array and vice versa is a easy. # # The Torch Tensor and NumPy array will share their underlying memory # locations, and changing one will change the other. # # In[14]: a = torch.ones(5) print(a) # In[15]: b = a.numpy() print(b) # See how the numpy array changes in value: # # # In[16]: a.add_(1) print(a) print(b) # All tensors on CPU except a CharTensor support converting to NumPy and back. # # ### CUDA Tensors # # Tensors can be moved onto any device using the ``.to`` method. # # # In[17]: # let us run this cell only if CUDA is available # We will use ``torch.device`` objects to move tensors in and out of GPU if torch.cuda.is_available(): device = torch.device("cuda") # a CUDA device object y = torch.ones_like(x, device=device) # directly create a tensor on GPU x = x.to(device) # or just use strings ``.to("cuda")`` z = x + y print(z) print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together! # # In[17]: x = torch.tensor(1., requires_grad=True) w = torch.tensor(2., requires_grad=True) b = torch.tensor(3., requires_grad=True) # In[18]: # Build a computational graph. y = w * x + b # In[19]: y.backward() # In[21]: print(x.grad) # x.grad = 2 print(w.grad) # w.grad = 1 print(b.grad) # b.grad = 1 print(y.grad)