forked from ale3otik/NightPy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
keras_lstm.py
122 lines (102 loc) · 4.85 KB
/
keras_lstm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# import keras.backend as K
# from keras.layers import Dense, Activation, Input, LSTM,LSTMCell, Dropout, multiply, Lambda
# from keras.models import Model, Sequential
# from keras import optimizers
# from keras import regularizers
# import tensorflow as tf
# from keras.activations import sigmoid, linear, tanh
# from keras import backend as K
# from keras.engine.topology import Layer
# import numpy as np
# from keras.losses import mean_squared_error
# from keras import optimizers
# class PhasedLSTMCell(keras.layers.Layer):
# def __init__(self, units, cell, **kwargs):
# self.units = units
# self.state_size = units
# self.cell = cell
# super(PhasedLSTMCell, self).__init__(**kwargs)
# def build(self, input_shape):
# self.built = True
# def call(self, inputs, states):
# tuple_inputs = tf.split(inputs, (inputs.shape[-1]-1,1), axis=-1)
# return self.cell(tuple_inputs, states)
# class MyLayer(Layer):
# def __init__(self, lstm_units=50,max_sequence_length=100):
# self.lstm_units = lstm_units
# self.max_sequence_length=max_sequence_length
# self.cell = tf.contrib.rnn.PhasedLSTMCell(num_units=self.lstm_units)
# super(MyLayer, self).__init__()
# def build(self, input_shape):
# self.shape = input_shape
# super(MyLayer, self).build(input_shape)
# def call(self, inputs, **kwargs):
# features, times = tf.split(inputs,[inputs.get_shape().as_list()[-1]-1, 1] ,axis=-1)
# print('feature', features.shape)
# print('times' , times.shape)
# lstm_layer, state = tf.nn.dynamic_rnn(self.cell, inputs=(features, times), dtype=tf.float32)
# print('lstm', lstm_layer.shape)
# return lstm_layer
# def compute_output_shape(self, input_shape):
# return (None, self.max_sequence_length, self.cell.output_size)
# def keras_phased_lstm_model(max_sequence_length=None, input_shape=None, lstm_units=50):
# input = Input(shape=(max_sequence_length, input_shape))
# timeseries = Input(shape=(max_sequence_length, 1))
# # timestamps = Input(shape=(max_sequence_length, 1))
# # cell = tf.contrib.rnn.LSTMCell(num_units=lstm_units)
# print(input)
# # cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_units, activation=tf.tanh)
# # lstm_layer, state = tf.nn.dynamic_rnn(cell, inputs=input,dtype=tf.float32)
# lstm_layer = MyLayer(50,max_sequence_length)(tf.concat([input, timeseries], axis=-1))
# dense = Dense(1, input_shape=(max_sequence_length, lstm_units))(lstm_layer)
# print(dense.shape)
# model = Model([input,timeseries], dense)
# optimizer = optimizers.Adam()
# model.compile(loss='mean_squared_error', optimizer=optimizer)
# return model
# def keras_lstm_model_1(max_sequence_length=None, input_shape=None, lstm_units=100, eps_reg=1e-2):
# input = Input(shape=(max_sequence_length, input_shape))
# state_input = Input(shape=(lstm_units,max_sequence_length))
# print(state_input.shape)
# cell = LSTM(units=lstm_units,
# return_sequences=True,
# return_state=True)
# print(cell.cell.state_size)
# lstm_layer = cell(input, initial_state = [state_input])
# dense = Dense(1, input_shape=(max_sequence_length, lstm_units),
# kernel_initializer='normal',
# kernel_regularizer=regularizers.l2(eps_reg),
# activation=linear)(lstm_layer)
# # print(lstm_layer.shape)
# # print(dense.shape)
# # print(input.shape)s
# model = Model([input, state_input], [dense, state])
# optimizer = optimizers.Adam(1e-3, clipvalue=10.0)
# model.compile(loss='mean_squared_error', optimizer=optimizer)
# return model
# def myloss(y_true, y_pred):
# print('true' ,y_true.shape)
# print('pr', y_pred.shape)
# return mean_squared_error(y_pred, y_true)
# def fake_loss(y_true,y_pred):
# return mean_squared_error(0 * y_true,0 * y_pred)
# def reccurent_model(input_shape, lstm_units=120, eps_reg=1e-2):
# input = Input((None, input_shape))
# input_state1 = Input((lstm_units,))
# input_state2 = Input((lstm_units,))
# cell = LSTMCell(units=lstm_units, activation=tanh)
# layer = RNN(cell, return_sequences=True, return_state=True, name='rnn')
# print(layer.cell.state_size)
# outputs, states1, states2 = layer(input, initial_state=(input_state1,input_state2))
# print(outputs.shape)
# print(states1.shape)
# print(states2.shape)
# densed = Dense(1,input_shape=(None, lstm_units),
# kernel_initializer='normal',
# kernel_regularizer=regularizers.l2(eps_reg),
# activation=linear)(outputs)
# model = Model(inputs = [input, input_state1, input_state2],
# outputs= densed)
# optimizer = optimizers.Adam(1e-3, clipvalue=10.0)
# model.compile(loss='mean_squared_error', optimizer=optimizer)
# return model