Network Security Internet Technology Development Database Servers Mobile Phone Android Software Apple Software Computer Software News IT Information

In addition to Weibo, there is also WeChat

Please pay attention

WeChat public account

Shulou

Lstm predicts the test version

2025-03-26 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Servers >

Share

Shulou(Shulou.com)06/03 Report--

#-coding:utf-8--

#! / bin/env python

# auth:kailkaka

# data:2019-3-3

# #

Import tensorflow as tf

From tensorflow import keras

Import numpy as np

Import warnings

Import pandas as pd

From sklearn import preprocessing

From sklearn.preprocessing import StandardScaler

Import matplotlib

Matplotlib.use ('qt4agg')

# import matplotlib.pyplot as plt

# plt.switch_backend ('agg')

Matplotlib.rcParams ['font.sans-serif'] = [' SimHei']

Matplotlib.rcParams ['font.family'] =' sans-serif'

Matplotlib.rcParams ['axes.unicode_minus'] = False

Import sys

Try:

Parameter=sys.argv [1]

Except:

Print "Please Enter parameter..."

Exit (1)

Warnings.filterwarnings ("ignore")

Reload (sys)

Sys.setdefaultencoding ('UTF-8')

Class ZcSummary:

Def read_csv (self):

With open ('config', 'r') as fileline:

List1 = fileline.readlines ()

For i in range (0, len (list1)):

If list1 [I]! ='\ npermission:

List1 [I] = List1 [I] .rstrip ('\ n')

If List1 [I] .rstrip (':') = = parameter:

Print "-", I

Break

Try:

Datafile= List1 [I + 1] .split ("=") [1] .replace ('\','') .rstrip ('\ n')

Attribute= List1 [I + 2] .split ("=") [1]

Tag= List1 [I + 3] .split ("=") [1]

Except:

Print "Please check parameter...,system is not support"

Exit (2)

V_dataframe = pd.read_csv (datafile,encoding='UTF-8')

# v_dataframe = v_dataframe.reindex (np.random.permutation (v_dataframe.index))

Return v_dataframe,attribute,tag

Def preprocess_features (self, california_housing_dataframe,attribute,tag):

Df = california_housingdataframe

Train = df [0:df.shape [0]-1000]

Test_ = df [df.shape [0]-1000:]

Predictors = attribute.rstrip ('\ n'). Replace ('\','). Replace ('[','). Replace (']','')

Predictors=predictors.split (',')

Tag=tag.rstrip ('\ n'). Replace ('\','). Replace ('[','). Replace (']','')

Xtrain = train [predictors]

Ytrain = train [tag]

Xtest = test [predictors]

Ytest = test [tag]

Num=len (predictors)

Test_y_disorder = preprocessing.scale (y_test). Reshape (- 1,1)

Train_y_disorder = preprocessing.scale (y_train). Reshape (- 1,1)

Ss_x = preprocessing.StandardScaler ()

Train_x_disorder = ss_x.fit_transform (X_train)

Test_x_disorder = ss_x.transform (X_test)

Return train_x_disorder,train_y_disorder,test_x_disorder,test_y_disorder,num

Def main (self):

California_housing_dataframe,attribute,tag = self.read_csv ()

XMagi YBESTRING YBESTESTRING features (california_housing_dataframe,attribute,tag)

Return X,y,X_test,y_test,num

T=ZcSummary ()

Train_x,train_y,X_test,y_test,num=t.main ()

BATCH_START = 0 # index when establishing batch data

TIME_STEPS = time_steps of 10 # backpropagation through time

BATCH_SIZE = 30

INPUT_SIZE = num # sin data input size

OUTPUT_SIZE = 1 # cos data output size

CELL_SIZE = hidden unit size of 10 # RNN

LR = 0.006 # learning rate

Def get_batch_boston ():

Global train_x, train_y,BATCH_START, TIME_STEPS

X_part1 = train_ x [bat _ START: BATCH_START+TIME_STEPSBATCH_SIZE]

Y_part1 = train_ y [Batch _ START: BATCH_START+TIME_STEPSBATCH_SIZE]

# print (u' time period =', BATCH_START, BATCH_START + TIME_STEPS BATCH_SIZE)

Seq = x_part1.reshape ((BATCH_SIZE, TIME_STEPS, INPUT_SIZE))

Res = y_part1.reshape ((BATCH_SIZE, TIME_STEPS, 1))

BATCH_START + = TIME_STEPS

# returned seq, res and xs: shape (batch, step, input)

# np.newaxis is used to add one dimension to three dimensions, and the third dimension will be used to store the status of the previous batch of samples

Return [seq, res]

Def get_batch ():

Global BATCH_START, TIME_STEPS

# xs shape (50batch, 20steps)

Xs = np.arange (BATCH_START, BATCH_START+TIME_STEPSBATCH_SIZE). Reshape ((BATCH_SIZE, TIME_STEPS)) / (10*np.pi)

Print ('xs.shape=',xs.shape)

Seq = np.sin (xs)

Res = np.cos (xs)

BATCH_START + = TIME_STEPS

# import matplotlib.pyplot as plt

# plt.plot (xs [0,:], res [0,:], 'ringing, xs [0,:], seq [0,:],' BMurray')

# plt.show ()

# print (u' before adding dimensions:', seq.shape)

# print (seq [: 2])

# print (after u' increases the dimension:', seq [:,:, np.newaxis] .shape)

# print (seq [: 2])

# returned seq, res and xs: shape (batch, step, input)

# np.newaxis is used to add one dimension to three dimensions, and the third dimension will be used to store the status of the previous batch of samples

Return [seq [:,:, np.newaxis], res [:,:, np.newaxis], xs]

Class LSTMRNN (object):

Def init (self, n_steps, input_size, output_size, cell_size, batch_size):

''

Param n_steps: how many timescales are included in each batch of data

: param input_size: the dimension of the input data

: param output_size: if the dimension of the output data is similar to the price curve, it should be 1

: param cell_size: the size of cell

: param batch_size: the number of training data per batch

''

Self.n_steps = n_steps

Self.input_size = input_size

Self.output_size = output_size

Self.cell_size = cell_size

Self.batch_size = batch_size

With tf.name_scope ('inputs'):

Self.xs = tf.placeholder (tf.float32, [None, n_steps, input_size], name='xs') # xs has three dimensions

Self.ys = tf.placeholder (tf.float32, [None, n_steps, output_size], name='ys') # ys has three dimensions

With tf.variable_scope ('in_hidden'):

Self.add_input_layer ()

With tf.variable_scope ('LSTM_cell'):

Self.add_cell ()

With tf.variable_scope ('out_hidden'):

Self.add_output_layer ()

With tf.name_scope ('cost'):

Self.compute_cost ()

With tf.name_scope ('train'):

Self.train_op = tf.train.AdamOptimizer (LR) .minimize (self.cost)

# add an input layer

Def add_input_layer (self,):

L_in_x: (batch*n_step, in_size), which is equivalent to stringing the samples of this batch onto a timeline of 1000 in length, with 50 samples per batch. Each sample has 20 moments l_in_x = tf.reshape (self.xs, [- 1, self.input_size], name='2_2D') #-1 represents any number of lines # Ws (in_size, cell_size) Ws_in = self._weight_variable ([self.input_size, self.cell_size]) # bs (cell_size,) bs_in = self._bias_variable ([self.cell_size ) # l_in_y = (batch * n_steps, cell_size) with tf.name_scope ('Wx_plus_b'): l_in_y = tf.matmul (l_in_x, Ws_in) + bs_in # reshape l_in_y = > (batch, n_steps, cell_size) self.l_in_y = tf.reshape (l_in_y, [- 1, self.n_steps, self.cell_size] Name='2_3D') # status overlay layer def add_cell (self): lstm_cell = tf.nn.rnn_cell.BasicLSTMCell (self.cell_size, forget_bias=1.0, state_is_tuple=True) with tf.name_scope ('initial_state'): self.cell_init_state = lstm_cell.zero_state (self.batch_size, dtype=tf.float32) # time_major=False indicates that the main line of time is not the first column batch self.cell_outputs Self.cell_final_state = tf.nn.dynamic_rnn (lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) # add an output layer def add_output_layer (self): # shape = (batch * steps, cell_size) l_out_x = tf.reshape (self.cell_outputs, [- 1, self.cell_size] Name='2_2D') Ws_out = self._weight_variable ([self.cell_size, self.output_size]) bs_out = self._bias_variable ([self.output_size,]) # shape = (batch * steps, output_size) with tf.name_scope ('Wx_plus_b'): self.pred = tf.matmul (l_out_x Ws_out) + bs_out # Forecast result def compute_cost (self): losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example ([tf.reshape (self.pred, [- 1], name='reshape_pred')], [tf.reshape (self.ys, [- 1], name='reshape_target')], [tf.ones ([self.batch_size * self.n_steps], dtype=tf.float32)] Average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses') with tf.name_scope ('average_cost'): self.cost = tf.div (tf.reduce_sum (losses, name='losses_sum'), self.batch_size, name='average_cost') tf.summary.scalar (' cost') Self.cost) def ms_error (self, labels,logits): # Parameter may be because the tf.contrib.legacy_seq2seq.sequence_loss_by_example parameter return tf.square (tf.subtract (labels,logits)) def _ weight_variable (self, shape, name='weights'): initializer= tf.random_normal_initializer (mean=0., stddev=1.,) return tf.get_variable (shape=shape, initializer=initializer, name=name) def _ bias_variable (self, shape) Name='biases'): initializer= tf.constant_initializer (0.1) return tf.get_variable (name=name, shape=shape, initializer=initializer)

If name = 'main':

Seq, res = get_batch_boston ()

Model = LSTMRNN (TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)

Sess = tf.Session ()

Merged = tf.summary.merge_all ()

Writer = tf.summary.FileWriter ("houseprice", sess.graph)

Tf.initialize_all_variables () no long valid from# 2017-03-02 if using tensorflow > = 0.12sess.run (tf.global_variables_initializer ()) # relocate to the local dir and run this line to view it on Chrome (http://0.0.0.0:6006/):# $tensorboard-- logdir='logs'for j in range (1000): # train pred_res=None for i in range (20): # divide the whole data into 20 time periods seq Res = get_batch_boston () if I = = 0: feed_dict = {model.xs: seq, model.ys: res, # create initial state} else: feed_dict = {model.xs: seq, model.ys: res Model.cell_init_state: state # use last state as the initial state for this run} _, cost, state, pred = sess.run ([model.train_op, model.cost, model.cell_final_state, model.pred], feed_dict=feed_dict) pred_res=pred result = sess.run (merged, feed_dict) writer.add_summary (result I) print ('{0} loss= '.format (j), round (cost, 4)) BATCH_START=0 # do it all over again # draw # print (u "result:", pred_res.shape) # consistent with the data used in the last training train_y = train_y [190 cost 490] # print (u' actual') Train_y.flatten () .shape) r_size=BATCH_SIZE * TIME_STEPS### drawing # import matplotlib.pyplot as pltfig = plt.figure (figsize= (20) 3)) # dpi parameter specifies the resolution of the drawing object That is, how many pixels per inch, the default value is 80axes = fig.add_subplot (1,1,1) # for convenience Only the last 100 rows of data line1,=axes.plot (range (100), pred.flatten () [- 100:], the results of the calculation of Bmura (range (len (gbr_pridict) # line2,=axes.plot (len (gbr_pridict)), gbr_pridict, 'line3,=axes.plot (range (100), train_y.flatten () [- 100:],' r') are displayed. Label=' actual') axes.grid () fig.tight_layout () # plt.legend (handles= [line1, line2,line3]) plt.legend (handles= [line1, line3]) plt.title (u 'recurrent neural network') plt.savefig ('houseprice.png') plt.show ()

Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.

Views: 0

*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.

Share To

Servers

Wechat

© 2024 shulou.com SLNews company. All rights reserved.

12
Report