R4 LSTM-火灾温度预测

发布于:2025-06-23 ⋅ 阅读:(19) ⋅ 点赞:(0)
import tensorflow as tf
import pandas     as pd
import numpy      as np

gpus = tf.config.list_physical_devices("GPU")
if gpus:
    tf.config.experimental.set_memory_growth(gpus[0], True)  #设置GPU显存用量按需使用
    tf.config.set_visible_devices([gpus[0]],"GPU")
print(gpus)

df_1 = pd.read_csv("./woodpine2.csv")
import matplotlib.pyplot as plt
import seaborn as sns

plt.rcParams['savefig.dpi'] = 500 #图片像素
plt.rcParams['figure.dpi']  = 500 #分辨率

fig, ax =plt.subplots(1,3,constrained_layout=True, figsize=(14, 3))

sns.lineplot(data=df_1["Tem1"], ax=ax[0])
sns.lineplot(data=df_1["CO 1"], ax=ax[1])
sns.lineplot(data=df_1["Soot 1"], ax=ax[2])
plt.show()

dataFrame = df_1.iloc[:,1:]
dataFrame

 

Tem1 CO 1 Soot 1
0 25.0 0.000000 0.000000
1 25.0 0.000000 0.000000
2 25.0 0.000000 0.000000
3 25.0 0.000000 0.000000
4 25.0 0.000000 0.000000
... ... ... ...
5943 295.0 0.000077 0.000496
5944 294.0 0.000077 0.000494
5945 292.0 0.000077 0.000491
5946 291.0 0.000076 0.000489
5947 290.0 0.000076 0.000487

5948 rows × 3 columns

 

width_X = 8
width_y = 1
X = []
y = []

in_start = 0

for _, _ in df_1.iterrows():
    in_end  = in_start + width_X
    out_end = in_end   + width_y
    
    if out_end < len(dataFrame):
        X_ = np.array(dataFrame.iloc[in_start:in_end , ])
        X_ = X_.reshape((len(X_)*3))
        y_ = np.array(dataFrame.iloc[in_end  :out_end, 0])

        X.append(X_)
        y.append(y_)
    
    in_start += 1

X = np.array(X)
y = np.array(y)

X.shape, y.shape
((5939, 24), (5939, 1))

 

from sklearn.preprocessing import MinMaxScaler

#将数据归一化,范围是0到1
sc       = MinMaxScaler(feature_range=(0, 1))
X_scaled = sc.fit_transform(X)
X_scaled.shape
(5939, 24)
X_scaled = X_scaled.reshape(len(X_scaled),width_X,3)
X_scaled.shape

 

(5939, 8, 3)
X_train = np.array(X_scaled[:5000]).astype('float64')
y_train = np.array(y[:5000]).astype('float64')

X_test  = np.array(X_scaled[5000:]).astype('float64')
y_test  = np.array(y[5000:]).astype('float64')
X_train.shape

 

(5000, 8, 3)

 

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,LSTM,Bidirectional
from tensorflow.keras        import Input

# 多层 LSTM
model_lstm = Sequential()
model_lstm.add(LSTM(units=64, activation='relu', return_sequences=True,
               input_shape=(X_train.shape[1], 3)))
model_lstm.add(LSTM(units=64, activation='relu'))

model_lstm.add(Dense(width_y))
# 只观测loss数值,不观测准确率,所以删去metrics选项
model_lstm.compile(optimizer=tf.keras.optimizers.Adam(1e-3),
                   loss='mean_squared_error')  # 损失函数用均方误差
X_train.shape, y_train.shape
((5000, 8, 3), (5000, 1))
history_lstm = model_lstm.fit(X_train, y_train, 
                         batch_size=64, 
                         epochs=40, 
                         validation_data=(X_test, y_test),
                         validation_freq=1)
Epoch 1/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 8s 29ms/step - loss: 17407.9004 - val_loss: 8287.8789
Epoch 2/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 271.4785 - val_loss: 765.4532
Epoch 3/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 23ms/step - loss: 67.4131 - val_loss: 295.6400
Epoch 4/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 22ms/step - loss: 34.1102 - val_loss: 171.0410
Epoch 5/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 25ms/step - loss: 15.5997 - val_loss: 160.7888
Epoch 6/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 23ms/step - loss: 10.5981 - val_loss: 54.3319
Epoch 7/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 21ms/step - loss: 8.8930 - val_loss: 90.8075
Epoch 8/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - loss: 8.3159 - val_loss: 59.9741
Epoch 9/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 15ms/step - loss: 7.6032 - val_loss: 135.2851
Epoch 10/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - loss: 9.3867 - val_loss: 97.8612
Epoch 11/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 19ms/step - loss: 9.0518 - val_loss: 126.9608
Epoch 12/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 19ms/step - loss: 8.0520 - val_loss: 86.8619
Epoch 13/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.3589 - val_loss: 99.1821
Epoch 14/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 19ms/step - loss: 9.5558 - val_loss: 88.7941
Epoch 15/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.5996 - val_loss: 69.6262
Epoch 16/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.3958 - val_loss: 79.1977
Epoch 17/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 21ms/step - loss: 7.4730 - val_loss: 70.9978
Epoch 18/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 21ms/step - loss: 8.0841 - val_loss: 75.3642
Epoch 19/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 21ms/step - loss: 6.9333 - val_loss: 63.2329
Epoch 20/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 22ms/step - loss: 8.4154 - val_loss: 63.0497
Epoch 21/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 19ms/step - loss: 11.4677 - val_loss: 133.5659
Epoch 22/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 17ms/step - loss: 10.6824 - val_loss: 130.6112
Epoch 23/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.0621 - val_loss: 74.8764
Epoch 24/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.3307 - val_loss: 93.0864
Epoch 25/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 23ms/step - loss: 6.2863 - val_loss: 81.8337
Epoch 26/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 3s 22ms/step - loss: 6.0637 - val_loss: 93.4994
Epoch 27/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 21ms/step - loss: 7.9105 - val_loss: 67.1826
Epoch 28/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 8.9972 - val_loss: 97.9051
Epoch 29/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.0677 - val_loss: 60.4530
Epoch 30/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 22ms/step - loss: 6.8877 - val_loss: 61.1201
Epoch 31/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 22ms/step - loss: 7.2029 - val_loss: 59.1220
Epoch 32/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 21ms/step - loss: 7.4075 - val_loss: 53.2644
Epoch 33/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - loss: 7.1988 - val_loss: 50.9414
Epoch 34/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 19ms/step - loss: 7.0997 - val_loss: 72.2796
Epoch 35/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 1s 18ms/step - loss: 8.1966 - val_loss: 61.2261
Epoch 36/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - loss: 7.1526 - val_loss: 58.8710
Epoch 37/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 3s 22ms/step - loss: 8.1424 - val_loss: 93.2947
Epoch 38/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 6.9144 - val_loss: 82.5569
Epoch 39/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 20ms/step - loss: 7.3446 - val_loss: 91.3829
Epoch 40/40
79/79 ━━━━━━━━━━━━━━━━━━━━ 2s 19ms/step - loss: 7.6978 - val_loss: 58.8386
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

plt.figure(figsize=(5, 3),dpi=120)

plt.plot(history_lstm.history['loss']    , label='LSTM Training Loss')
plt.plot(history_lstm.history['val_loss'], label='LSTM Validation Loss')

plt.title('Training and Validation Loss')
plt.legend()
plt.show()

 

predicted_y_lstm = model_lstm.predict(X_test)                        # 测试集输入模型进行预测

y_test_one = [i[0] for i in y_test]
predicted_y_lstm_one = [i[0] for i in predicted_y_lstm]

plt.figure(figsize=(5, 3),dpi=120)
# 画出真实数据和预测数据的对比曲线
plt.plot(y_test_one[:1000], color='red', label='真实值')
plt.plot(predicted_y_lstm_one[:1000], color='blue', label='预测值')

plt.title('Title')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show()

 

30/30 ━━━━━━━━━━━━━━━━━━━━ 1s 24ms/step

from sklearn import metrics
"""
RMSE :均方根误差  ----->  对均方误差开方
R2   :决定系数,可以简单理解为反映模型拟合优度的重要的统计量
"""
RMSE_lstm  = metrics.mean_squared_error(predicted_y_lstm, y_test)**0.5
R2_lstm    = metrics.r2_score(predicted_y_lstm, y_test)

print('均方根误差: %.5f' % RMSE_lstm)
print('R2: %.5f' % R2_lstm)

均方根误差: 7.67063 R2: 0.82748

收获: 学会如何通过LSTM进行文本的预测


网站公告

今日签到

点亮在社区的每一天
去签到