我目前正在开发一个open source ANN(为了好玩和学习的经验),我最近对我的代码做了一个我认为相对较小的改动,但出于某种原因,它使ANN的运行速度提高了16倍。(至少根据我的测试)
ANN/ANN5.py:(老ANN)
from random import uniform
class Neuron(object):
def __init__(self, parents=[]):
self.parents = [{
'neuron': parent,
'weight': uniform(-1, 1),
'slope': uniform(-1, 1),
} for parent in parents]
def calculate(self, increment=0):
self.output = sum([parent['neuron'].output * (parent['weight'] + increment * parent['slope']) for parent in self.parents]) > 0
def mutate(self, increment):
for parent in self.parents:
parent['weight'] += increment * parent['slope']
parent['slope'] = uniform(-1, 1)
def get_genome(self):
return [parent['weight'] for parent in self.parents]
def set_genome(self, value):
for i, parent in enumerate(self.parents):
parent['weight'] = value[i]
genome = property(get_genome, set_genome)
class NeuralNetwork(object):
def __init__(self, inputs, outputs, hidden, rows):
self.bias = Neuron()
self.neurons = []
for row in xrange(rows):
if row == 0:
self.neurons.append([Neuron(parents=[]) for input_ in xrange(inputs)])
elif row == rows - 1:
self.neurons.append([Neuron(parents=self.neurons[row - 1] + [self.bias]) for output in xrange(outputs)])
else:
self.neurons.append([Neuron(parents=self.neurons[row - 1] + [self.bias]) for column in xrange(hidden)])
self.bias.output = True
def calculate(self, inputs, increment=0):
for i, neuron_row in enumerate(self.neurons):
for j, neuron in enumerate(neuron_row):
if i == 0:
neuron.output = inputs[j]
else:
neuron.calculate(increment=increment)
return [neuron.output for neuron in self.neurons[-1]]
def mutate(self, increment):
for neuron_row in self.neurons:
for neuron in neuron_row:
neuron.mutate(increment=increment)
def get_genome(self):
genome = []
for neuron_row in self.neurons[1:]:
genome.append([neuron.genome for neuron in neuron_row])
return genome
def set_genome(self, value):
for i, neuron_row in enumerate(self.neurons[1:]):
for j, neuron in enumerate(neuron_row):
neuron.genome = value[i][j]
genome = property(get_genome, set_genome)
安/安·皮(新安)
from random import uniform
class Neuron(object):
def __init__(self, parents=[]):
self.parents = [{
'neuron': parent,
'weight': uniform(-1, 1),
'slope': uniform(-1, 1),
} for parent in parents]
def calculate(self, increment=0):
self.output = sum([parent['neuron'].output * (parent['weight'] + increment * parent['slope']) for parent in self.parents]) > 0
def mutate(self, increment):
for parent in self.parents:
parent['weight'] += increment * parent['slope']
parent['slope'] = uniform(-1, 1)
def get_genome(self):
return [parent['weight'] for parent in self.parents]
def set_genome(self, value):
for i, parent in enumerate(self.parents):
parent['weight'] = value[i]
genome = property(get_genome, set_genome)
class NeuralNetwork(object):
def __init__(self, inputs, outputs, hidden, rows):
self.bias = Neuron()
self.neurons = [[Neuron(parents=[]) for input_ in xrange(inputs)]]
for row in xrange(rows - 2):
self.neurons.append([Neuron(parents=self.neurons[-1] + [self.bias]) for output in xrange(outputs)])
self.neurons.append([Neuron(parents=self.neurons[-1] + [self.bias]) for output in xrange(outputs)])
self.bias.output = True
def calculate(self, inputs, increment=0):
for i, neuron_row in enumerate(self.neurons):
for j, neuron in enumerate(neuron_row):
if i == 0:
neuron.output = inputs[j]
else:
neuron.calculate(increment=increment)
return [neuron.output for neuron in self.neurons[-1]]
def mutate(self, increment):
for neuron_row in self.neurons:
for neuron in neuron_row:
neuron.mutate(increment=increment)
def get_genome(self):
genome = []
for neuron_row in self.neurons[1:]:
genome.append([neuron.genome for neuron in neuron_row])
return genome
def set_genome(self, value):
for i, neuron_row in enumerate(self.neurons[1:]):
for j, neuron in enumerate(neuron_row):
neuron.genome = value[i][j]
genome = property(get_genome, set_genome)
从ANN/ANN5.py到ANN的差异/安·皮地址:
- self.neurons = []
- for row in xrange(rows):
- if row == 0:
- self.neurons.append([Neuron(parents=[]) for input_ in xrange(inputs)])
- elif row == rows - 1:
- self.neurons.append([Neuron(parents=self.neurons[row - 1] + [self.bias]) for output in xrange(outputs)])
- else:
- self.neurons.append([Neuron(parents=self.neurons[row - 1] + [self.bias]) for column in xrange(hidden)])
+ self.neurons = [[Neuron(parents=[]) for input_ in xrange(inputs)]]
+ for row in xrange(rows - 2):
+ self.neurons.append([Neuron(parents=self.neurons[-1] + [self.bias]) for output in xrange(outputs)])
+ self.neurons.append([Neuron(parents=self.neurons[-1] + [self.bias]) for output in xrange(outputs)])
(都在神经网络的__init__
内)
你知道吗测试.py地址:
from random import randint
from time import time
from ANN.ANN import NeuralNetwork
# from ANN.ANN2 import NeuralNetwork as NeuralNetwork2
# from ANN.ANN3 import NeuralNetwork as NeuralNetwork3
# from ANN.ANN4 import NeuralNetwork as NeuralNetwork4
from ANN.ANN5 import NeuralNetwork as NeuralNetwork5
def test(NeuralNetwork=NeuralNetwork):
time_ = time()
ANNs = []
for i in xrange(10):
ANNs.append(NeuralNetwork(inputs=49, outputs=3, hidden=49, rows=5))
for i, ANN in enumerate(ANNs[:1]):
for j in xrange(11):
for k in xrange(len(ANNs) / 2):
for l in xrange(20):
ANN.calculate([randint(0, 1) for _ in xrange(49)], increment=j/10)
ANNs[k + len(ANNs)/2 * (i < len(ANNs)/2)].calculate([randint(0, 1) for _ in xrange(49)])
# print 'ANN {0} mutation {1:02d} opponent {2} turn {3:02d}'.format(i + 1, j + 1, k + 1, l + 1)
ANN.mutate(increment=randint(1, 100))
return time() - time_
if __name__ == '__main__':
print 'time: {0}'.format(test())
# print 'time 2: {0}'.format(test(NeuralNetwork2))
# print 'time 3: {0}'.format(test(NeuralNetwork3))
# print 'time 4: {0}'.format(test(NeuralNetwork4))
print 'time 5: {0}'.format(test(NeuralNetwork5))
我注释掉了ANN2、ANN3和ANN4,因为它们甚至是我存储的ANN的旧版本(仅在本地,没有一个在Github上),以便比较性能。目前我只担心ANN5.py和安·皮你知道吗
我之所以做for i, ANN in enumerate(ANNs[:1]):
而不是for i, ANN in enumerate(ANNs):
,是因为后者的测试花费了太长时间,我认为如果不重复10个ANN的过程,结果仍然是完全足够的(我偶尔会对所有10个进行测试以确保)
我上次跑步的时候测试.py这就是我得到的:
time: 0.454416036606
time 5: 8.02504611015
它总是给出一些接近的东西。你知道吗
我做了各种测试,比较安·皮到目前为止,他们在同样的情况下也做了同样的事情。我用基因组属性做了两个完全相同的人工神经网络,一个用神经网络类安·皮还有一个使用ANN5.py中的NeuralNetwork类,它们总是给我相同的结果,提供相同的输入。你知道吗
所以我的问题是,发生了什么?我知道我的问题不是很精确,但我真的不知道为什么会有如此巨大的性能差异。我希望旧的ANN(ANN5.py)只是在后台做一些非常低效的事情,因为我初始化ANN的方式,而新的ANN(安·皮)初始化是否正确,但是我担心新的ANN有一些完全缺失的东西,在我手动测试这两个的时候,由于某些原因没有出现/没有任何区别。你知道吗
我有点忘记了这个问题,以为之前发生了不好的事情,但最近我注意到正在生成的基因组相当短。就在那时,我注意到我在循环使用长度为3的xrange(outputs),而不是长度为49的xrange(hidden)。你知道吗
在我改变之后:
收件人:
它返回到与旧代码相似的速度,但至少它生成了一个正确大小的ANN。你知道吗
相关问题 更多 >
编程相关推荐