900字范文,内容丰富有趣,生活中的好帮手!
900字范文 > Python机器学习:梯度下降法004实现线性回归中的梯度下降法

Python机器学习:梯度下降法004实现线性回归中的梯度下降法

时间:2019-05-03 21:36:55

相关推荐

Python机器学习:梯度下降法004实现线性回归中的梯度下降法

直接上代码

import numpy as npimport matplotlib.pyplot as plt

生成模拟数据

np.random.seed(666)x = 2 * np.random.random(size = 100)y = x * 3 + 4 + np.random.normal(size=100)

多维(多个点)多元(多个特征)

X = x.reshape(-1,1)

plt.scatter(x,y)

损失函数

#损失函数def J(theta,X_b,y):try:return np.sum((y - X_b.dot(theta) ** 2)) / len(X_b)except:return float('inf')

导数

def dJ(theta,X_b,y):res = np.empty(len(theta))res[0] = np.sum(X_b.dot(theta) - y)for i in range(1,len(theta)):res[i] = (X_b.dot(theta) - y).dot(X_b[:,i])return res * 2 / len(X_b)

theta_history = []def gradient_descent(X_b,y,initial_theta,eta,n_iters = 1e4,epsilon = 1e-8):theta = initial_thetai_iter = 0while i_iter < n_iters:gradient = dJ(theta,X_b,y)last_theta = thetatheta = theta - eta * gradientif (np.abs(J(theta,X_b,y) - J(last_theta,X_b,y)) < epsilon):breaki_iter += 1return theta

X_b = np.hstack([np.ones((len(x),1)),x.reshape(-1,1)])initial_theta = np.zeros(X_b.shape[1])eta = 0.1theta = gradient_descent(X_b,y,initial_theta,eta)print(theta)

[4.02369667 3.00517447]

使用我们封装的

import numpy as npfrom Simple_linear_Regression.metrics import r2_scoreclass LinearRegression:def __init__(self):"""初始化Linear Regression 模型"""self.coef_ = Noneself.interception_ = Noneself._theta = Nonedef fit_normal(self,X_train,y_train):"""根据训练数据集X_train,y_train训练Linear Regression"""assert X_train.shape[0] == y_train.shape[0],\"the size of X_train must be equal to the size of y_train"X_b = np.hstack([np.ones((X_train.shape[0],1)),X_train])#X_b = np.hstack([np.ones((len(X_train)),1),X_train])self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)self.interception_ = self._theta[0]self.coef_ = self._theta[1:]return selfdef fit_gd(self,X_train,y_train,eta=0.01,n_iters = 1e4):"""根据训练数据集X_train,y_train,使用梯度下降法训练Linera Regression模型"""assert X_train.shape[0] == y_train.shape[0],\"the size of X_train must be equal to the size of y_train"def J(theta, X_b, y):try:return np.sum((y - X_b.dot(theta) ** 2)) / len(X_b)except:return float('inf')def dJ(theta, X_b, y):res = np.empty(len(theta))res[0] = np.sum(X_b.dot(theta) - y)for i in range(1, len(theta)):res[i] = (X_b.dot(theta) - y).dot(X_b[:, i])return res * 2 / len(X_b)def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):theta = initial_thetai_iter = 0while i_iter < n_iters:gradient = dJ(theta, X_b, y)last_theta = thetatheta = theta - eta * gradientif (np.abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):breaki_iter += 1return thetaX_b = np.hstack([np.ones((len(X_train), 1)), X_train])initial_theta = np.zeros(X_b.shape[1])self._theta = gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8)self.interception_ = self._theta[0]self.coef_ = self._theta[1:]return selfdef predict(self,X_predict):"""给定待测数据集X_predict,返回表示X_predict 的结果向量"""assert self.interception_ is not None and self.coef_ is not None,\"must fit before predict"assert X_predict.shape[1] == len(self.coef_),\"the feature number of X_predict must be equal to X_train"X_b = np.hstack([np.ones((X_predict.shape[0], 1)), X_predict])return X_b.dot(self._theta)def score(self,X_test,y_test):"""根据测试数据集X_test和y_test确定当前模型的准确度"""y_predict = self.predict(X_test)return r2_score(y_test,y_predict)def __repr__(self):return "LinearRegression()"if __name__ == '__main__':np.random.seed(666)x = 2 * np.random.random(size=100)y = x * 3 + 4 + np.random.normal(size=100)X = x.reshape(-1, 1)lin_reg = LinearRegression()lin_reg.fit_gd(X,y)print(lin_reg.coef_)print(lin_reg.interception_)

[3.00517934]4.023690891597359

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。