我试图拟合多项式函数的不同程度的一些数据,我产生和我做梯度下降没有一个图书馆。我还使用了一个自定义的损失函数,我手工计算了梯度(希望它是正确的)。问题是对于残差平方的均值,我得到了无穷大和nan,我不知道我做错了什么。请帮帮我。你知道吗
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
#y(x) = sin(x) + noise
def F(x_in_rad):
return np.sin(x_in_rad) + noise*np.random.normal(0,1,N)
noise = 0.5
N = 50
#X - N datapoints in radians
X = np.deg2rad(np.random.normal(0,1,N) * 359)
Y = F(X)
X = np.atleast_2d(X).T
Y = np.atleast_2d(Y).T
#split data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
#mean of the squared residuals
def sq_res_mean(Y_pred,Y_real):
return np.mean((Y_pred - Y_real)**2)
#create design matrix
def designmatpoly(X,degree):
X = X[:,0]
eye = np.ones(X.shape)
rows = []
rows.append(eye)
for i in range(1,degree+1):
rows.append(X**i)
return np.stack(rows).T
#L2 norm squared; gradient = 2w
def C(w):
return np.sum(w**2)
def gradientdescent(Amat, y, rate, numiter, lam, deg):
n, p = Amat.shape
whistory = []
w_analytical = np.dot((np.dot(Amat.T,Amat) + lam*np.eye(deg+1, dtype=int))**(-1),np.dot(Amat.T,Y_train))
losshistory = []
#random weights initialized
w = np.atleast_2d(np.random.randn(deg+1)).T
for i in range(numiter):
loss = np.square(y - w[0] - np.dot(Amat, w)) + lam*C(w_analytical)
whistory.append(w)
losshistory.append(loss)
grad = np.dot(-2*Amat.T, y - w[0] - Amat.dot(w)) + lam*2*w_analytical
w = w - rate*grad
return w, np.asarray(whistory), np.asarray(losshistory)
def model(degree,rate=0.0001, num_iters = 50, lam = 0.5):
A_test = designmatpoly(X_test,degree)
A_train = designmatpoly(X_train,degree)
wfin, whist, meanlosstrace = gradientdescent(A_train, Y_train, rate, num_iters, lam, degree)
return wfin, A_test
degrees = []
sq_res_means = []
for i in range(1,10):
wfin, A_test = model(degree=i)
degrees.append(i)
Y_pred = np.dot(A_test,wfin)
sqrm = sq_res_mean(Y_pred,Y_test)
sq_res_means.append(sqrm)
print("deg",i,"sq_res_mean",sqrm)
我不确定,因为所有的数字都有稀疏的变量名,但数字问题是,在线性以上的任何程度上,你的
w
向量都会指数级地失控,直到溢出;这就是你得到NaN
值的原因。你知道吗从功能上讲,计算的梯度与
w
向量的大小不相称;即使学习率很低,也足以在短时间内将w
推向发散。rate*grad
仍然比w
本身大。你知道吗我建议你用一个已知的解决方案将你的矩阵初始化为一个简单的系统,观察deg=2时的前2或3次迭代,看看计算结果是如何偏离你的预期的。你知道吗
相关问题 更多 >
编程相关推荐