经过多次迭代后,LSTM损失保持不变

2024-10-01 19:16:43 发布

您现在位置:Python中文网/ 问答频道 /正文

感谢您研究这个问题!:)

我试图训练一个LSTM网络,根据过去30天的股价预测谷歌未来10天的股价。我训练了LSTM,但即使经过200次迭代,损失也几乎没有减少。我怀疑这个问题可能是由于tf会话中的feed命令引起的。然而,我没有发现任何问题(也许是由于我的肤浅知识)。似乎优化器会刷新tf会话中的每个迭代

如果我对优化器用法的理解有误,如果能就代码中可能出现的错误寻求建议,我将不胜感激

谢谢你的帮助

import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sys
import csv
import random
import tensorflow as tf
from tensorflow.contrib import rnn

# Define data reader
def read_data(fname):
    with open(fname) as f:
        data = list(csv.reader(f))
        d_mat = np.array(data)
        d_trsp = np.transpose(d_mat)

        date = np.transpose(d_trsp[0])
        p_open = d_trsp[1]
        vol = d_trsp[6]
        chg = d_trsp[7]
        chg = [float(i) for i in chg]

        return vol, chg

vol, training_data = read_data('GOOGL.csv')

training_data = training_data[0:300]
print("Loading training data..")

#Split data for learning
ratio_train = 0.70
ratio_valid = 0.90-ratio_train
ratio_test = 0.10 #fixed at 10% of dataset

# Parameters
learning_rate = 0.005
training_iters = 100
display_step = 1
x_size = 30
y_size = 5
n_hidden = 256

# Variables
x = tf.placeholder("float", [265, x_size])
y = tf.placeholder("float", [265, y_size])

weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, y_size]))
    }

biases = {
    'out': tf.Variable(tf.random_normal([y_size]))
    }

# Preprocess Data
def prod_data(data):
    x = []
    y = []

    iter = len(data)-x_size-y_size
    for i in range(0, iter):
        x.append(data[i:i+x_size])
        y.append(data[i+x_size+1: i+x_size+1+y_size])

    return x, y

a,b = prod_data(training_data)

# Define RNN architecture
def RNN(x, weights, biases):
    # Reshape x to [1, n_input]
    x = tf.reshape(x, [-1, x_size])
    x = tf.split(x, x_size, 1)

    rnn_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(n_hidden), rnn.BasicLSTMCell(n_hidden)])
    outputs, states = rnn.static_rnn(rnn_cell, x, dtype = tf.float32)

    return tf.matmul(outputs[-1], weights['out'] + biases['out'])

pred = RNN(x, weights, biases)

# Loss and Optimizer
cost = tf.reduce_mean((pred-y)**2)
optimizer = tf.train.RMSPropOptimizer(learning_rate = learning_rate).minimize(cost)

# Initialization
init = tf.global_variables_initializer()

# Launch Tensor graph
with tf.Session() as sess:
    sess.run(init)
    step = 0
    loss_total = 0
    loss_coll = []
    end_offset = len(training_data)-y_size-x_size-1  

    while step < training_iters:

        _, loss, model_pred = sess.run([optimizer, cost, pred], \
                                                feed_dict={x: a, y: b})

        # Update total loss and accuracy
        loss_total += loss
        loss_coll.append(loss)


        if (step+1) % display_step == 0:
            print("Loss at step " + str(step) + " = " + str(loss))
            loss_total = 0

        step += 1

    print("Optimization Finished!")

Tags: importdatasizetfasstepnptraining

热门问题