我在这个网站上按照以下步骤创建了一个神经网络:
https://machinelearningmastery.com/implement-backpropagation-algorithm-scratch-python/
我的神经网络是这样的:
from random import seed
from random import randrange
from random import random
from csv import reader
from math import exp
# Attivazione del neurone
def activate(pesi, inputs):
activation = pesi[-1]
for i in range(len(pesi)-1):
activation += pesi[i] * inputs[i]
return activation
# Transfer function scelta
def transfer(activation):
return 1.0 / (1.0 + exp(-activation))
# Propagazione forward
def forward_propagate(network, row):
inputs = row
for layer in network:
nuovi = []
for neuron in layer:
activation = activate(neuron['pesi'], inputs)
neuron['output'] = transfer(activation)
nuovi.append(neuron['output'])
inputs = nuovi
return inputs
# Derivata
def transfer_derivative(output):
return output * (1.0 - output)
# Back Propagation
def backward_propagate_error(network, valore):
for i in reversed(range(len(network))):
layer = network[i]
errorF = list()
if i != len(network)-1:
for j in range(len(layer)):
errore = 0.0
for neuron in network[i + 1]:
errore += (neuron['pesi'][j] * neuron['delta'])
errorF.append(errore)
else:
for j in range(len(layer)):
neuron = layer[j]
errorF.append(valore[j] - neuron['output'])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errorF[j] * transfer_derivative(neuron['output'])
#aggiornamento pesi
def update_weights(network, row, Coeff):
for i in range(len(network)):
inputs = row[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in network[i - 1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['pesi'][j] += Coeff * neuron['delta'] * inputs[j]
neuron['pesi'][-1] += Coeff * neuron['delta']
#training della rete
def train_network(network, train, Coeff, NumEpoc, n_outputs):
for epoch in range(NumEpoc):
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[int(row[-1])] = 1
backward_propagate_error(network, expected)
update_weights(network, row, Coeff)
#inizializzazione
def initialize_network(n_inputs, n_hidden, n_outputs):
network = list()
hidden_layer = [{'pesi':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer = [{'pesi':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
# Predict
def predict( row):
outputs = forward_propagate(network, row)
return outputs.index(max(outputs))
事情进展得很顺利,我用以下方法训练他:
seed(1)
dataset = np.insert(XL,2,yL,axis=1)
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([int(row[-1]) for row in dataset]))
network = initialize_network(n_inputs, 3, n_outputs)
train_network(network, dataset, 0.2, 2000, n_outputs)
我可以用它来预测
x=predict(network, XT[i])
我的数据集是:
我用以下方法创建它:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
import matplotlib
matplotlib.rcParams['figure.figsize'] = [10, 10]
# data
def donut_data(N,noise=0.8):
X = np.random.randn(N, 2)
X_r = X + np.random.uniform(-noise,noise,X.shape)
Y = np.logical_or(X_r[:, 0]*X_r[:, 0] + X_r[:, 1]*X_r[:, 1] < 0.3, X_r[:, 0]*X_r[:, 0] + X_r[:, 1]*X_r[:, 1]> 2)
Y = np.where(Y, 1, -1)
return X, Y
def plot_data(X,Y,c1='b',c2='r', toplot=True):
plt.scatter(X[Y==1, 0], X[Y==1, 1],c=c1, marker='x', label='1')
plt.scatter(X[Y==-1, 0], X[Y==-1, 1],c=c2, marker='s', label='-1')
if toplot:
plt.ylim(-3.0)
plt.legend()
plt.show()
N = 1000
noise = 0.5
XL, yL = donut_data(N, noise)
XT, yT = donut_data(N, noise)
plot_data(XL,yL,'b','r',True)
plot_data(XT,yT,'k','m', True)
我想使用plot_decision_regions(XT,yT,network)来绘制这些区域,但我对这个程序有一个问题
网络是一个简单的列表,而不是一个类的对象,就像在plot\u decision\u区域中所期望的那样。我怎样才能克服这个问题?我必须重写这个程序,把网络作为课堂的一个对象吗
我认为plot_decision_regions()确实需要一个网络(或任何分类器)作为对象。这是因为在代码内部,它将.predict()方法应用于整个meshgrid,以了解神经网络如何对其进行分类
查看类似函数的代码:
相关问题 更多 >
编程相关推荐