# Create a matrix A <- 1:12; # A is a 1-dimensional array of the integers 1-12 dim(A) <- c(4,3); # Change the dimension of A to be 4 by 3 print("A") A # the matrix A, which is 4 by 3 # Another way to do the same thing A <- matrix( + c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), # data nrow=4, # number of rows ncol=3, # number of columns byrow = FALSE) ; # read the data by column A # Another way A <- matrix( seq(1,12), # data nrow=4, # number of rows ncol=3, # number of columns byrow = FALSE) ; # read the data by column A # Transpose A print("Transpose of A") t(A) # the transpose of A, which is 3 by 4 A <- t(A); # overwrite A with its transpose: A is now a 3 by 4 array A # Another way to make this matrix A <- matrix( seq(1,12), # data nrow=3, # number of rows ncol=4, # number of columns byrow = TRUE) ; # read the data by row A # Scalar multiplication alpha <- 3; A alpha*A # R understands matrix negation -A -1*A # in R, you can create an m by n zero matrix as follows: Z <- matrix(0, 3, 4); Z A + Z # To create an n by n identity matrix in R, make a diagonal matrix of ones: I = diag(1,3) # a diagonal matrix of 1s, with dimension 3 by 3 I I2 = diag(c(1,1,1)) # a diagonal matrix with the vector (1,1,1) along the diagonal I2 # R supports scalar addition; this is not a formal operation in linear algebra alpha + A; # Matrix addition B <- 24:13; # B is an array with the integers 24 to 13 in descending order dim(B) = c(4,3); # B is now a 4 by 3 array B <- t(B); # B is now a 3 by 4 array A # 1:12 B # 24:13 A+B # a 3 by 4 array; all entries are 25 # Matrix multiplication C <- 1:4; C A%*%C # a 3 by 2 matrix cat("the (3,1) entry should be", 9*1+10*2+11*3+12*4) # Check the (3,1) entry # now multiplying a matrix by a matrix The matrix multiplication operator is %*% A %*% t(B) # a 3 by 3 matrix t(A) %*% B # a 4 by 4 matrix # Multiplication by the identity matrix I3 <- diag(1,3); # I3 is the 3 by 3 identity matrix I3 %*% A # multiplying from the left by the 3 by 3 identity matrix gives us back A # I4 <- diag(1,4); # I4 is the 4 by 4 identity matrix A %*% I4 # multiplying from the right by the 4 by 4 identity matrix gives us back A # R also does elementwise multiplication; again, this is not a formal operation in linear algebra A*C # R copies C repeatedly to fill out the dimension of A, # then multiplies elements of A by the corresponding element # The dot product a <- 1:5; b <- c(rep(1,3), rep(2,2)); # new function: define a vector by repeating values b t(a) %*% b # another way to do it sum(a*b) # the 2-norm norm(as.matrix(a), type="2") # the "norm" function operates on matrices--not lists. # The function "as.matrix()" turns the list a into a matrix. # alternative 1 sqrt(sum(a*a)) # alternative 2 sqrt(t(a) %*% a) # check: sqrt(1^2+2^2+3^2+4^2+5^2) # the 1-norm norm(as.matrix(a), type="o") # alternative sum(abs(a)) # check: 1+2+3+4+5 # Here's how to plot a histogram in R. You will need this for the exercise below. x <- runif(1000, min=-1, max=2) # generate pseudorandom numbers uniformly distributed between -1 and 2. hist(x) # plot a histogram of the results # Trace in R A <- array(1:5,dim=c(5,5)) A sum(diag(A)) # diag() extracts the diagonal of a matrix; sum() adds the elements of a list # Determinant A <- array(1:5,dim=c(5,5)) A # the rows and columns are multiples of each other, so A is singular (not invertible) det(A) # 0 # The set of singular matrices has Lebesgue measure 0, so a random matrix is nonsingular with probability 1 A <- array(runif(25), dim=c(5,5)) # 5 by 5 matrix with uniformly distributed elements A det(A) # Inverse solve(A) # this is almost never the right thing to do # Eigen decompositions in R A <- array(c(1,1,1,1,2,3,0,1,1), dim=c(3,3)); eigen(A) # eigen() is an example of a function that returns more than one variable. Here's how you access them: decomp <- eigen(A); decomp$values decomp$vectors # let's check whether these really are eigenvectors! ev1 <- decomp$vectors[,1]; # the first column of the eigenvector matrix: an eigenvector ev1 A %*% ev1 / decomp$values[1] # this should give us back the first eigenvector # check that the eigen-decomposition of A actually holds A decomp$vectors %*% diag(decomp$values) %*% solve(decomp$vectors) # check that the sum of the eigenvalues is the trace sum(diag(A)) sum(decomp$values) # check that the product of the eigenvalues is the determinant det(A) prod(decomp$values) # check whether we can invert A using its eigen-decomposition Ainv <- decomp$vectors %*% diag(1/decomp$values) %*% solve(decomp$vectors); Ainv A %*% Ainv # this should be the identity matrix # this matrix is not positive-definite, so the eigenvector matrix is not orthogonal: its # transpose is not its inverse. Let's verify that: t(decomp$vectors) solve(decomp$vectors) # Now let's try it with a positive-definite matrix. We can make a positive definite matrix from a nonsingular matrix Apd <- t(A) %*% A; Apd decomp <- eigen(Apd); decomp$values # the eigenvalues should be non-negative decomp$vectors %*% t(decomp$vectors) # and the matrix of eigenvectors should be orthogonal # the eigenvector matrix should be orthogonal. Let's check: t(decomp$vectors) solve(decomp$vectors) # should be equal to the transpose # lets make the matrix square root of Apd ApdHalf <- decomp$vectors %*% diag(sqrt(decomp$values)) %*% t(decomp$vectors); ApdHalf ApdHalf %*% ApdHalf # Let's try this in R A <- array(c(2,6,3,4,1,4,4,5,3), dim=c(3,3)); A b <- c(2,1,1); b solve(A,b) # solve a linear system using R A <- array(runif(25), dim=c(5,5)) # 5 by 5 matrix with uniformly distributed elements b <- rep(1,5); x <- solve(A,b); x A %*% x # check that x solves the linear system Ainv = solve(A); Ainv x <- Ainv %*% b; x A %*% x