A user is implementing a univariate linear regression on python but the code output NaN for theta after going to infinity. How to fix that?

250    Asked by ranjan_6399 in Data Science , Asked on Jan 15, 2020

# implementation of univariate linear regression

import numpy as np

def cost_function(hypothesis, y, m):

return (1 / (2 * m)) * ((hypothesis - y) ** 2).sum()

def hypothesis(X, theta):

return X.dot(theta)

def gradient_descent(X, y, theta, m, alpha):

for i in range(1500):

temp1 = theta[0][0] - alpha * (1 / m) * (hypothesis(X, theta) - y).sum()

temp2 = theta[1][0] - alpha * (1 / m) * ((hypothesis(X, theta) - y) * X[:, 1]).sum()

theta[0][0] = temp1

theta[1][0] = temp2

return theta

if __name__ == '__main__':

y = data[:, 1]

m = y.size

X = np.ones(shape=(m, 2))

X[:, 1] = data[:, 0]

theta = np.zeros(shape=(2, 1))

alpha = 0.01

The problem is when we do X[:,1] or data[:,1], we get objects of shape (m,). When wemultiply an object of shape (m,) with a matrix of shape (m,1), we get a matrix of size (m,m).Below code will fix the problem

# implementation of univariate linear regression

import numpy as np

def cost_function(hypothesis, y, m):

return (1 / (2 * m)) * ((hypothesis - y) ** 2).sum()

def hypothesis(X, theta):

return X.dot(theta)

def gradient_descent(X, y, theta, m, alpha):

X_1 = X[:,1]

X_1 = X_1.reshape((m,1))

for i in range(1500):

temp1 = theta[0][0] - alpha * (1 / m) * (hypothesis(X, theta) - y).sum()

temp2 = theta[1][0] - alpha * (1 / m) * ((hypothesis(X, theta) - y) * X_1).sum()

theta[0][0] = temp1

theta[1][0] = temp2

return theta

if __name__ == '__main__':

data= np.random.normal(size=(100,2))

y = 30*data[:,0] + data[:, 1]

m = y.size

X = np.ones(shape=(m, 2))

y = y.reshape((m,1))

X[:, 1] = data[:, 0]

theta = np.zeros(shape=(2, 1))

alpha = 0.01