1.sequelize中Models-Defination

本文介绍如何使用Sequelize定义数据模型,包括设置字段属性、验证规则及表名等,并演示如何将模型同步到数据库。
1.定义数据模型module
2.把一个model转化为table,使用define()方法,例如:

var UserRole = sequelize.define("UserRole", {
id:{//对字段的具体设置,比如数据类型等等
type: sequelize.INTEGER, // int, 32 bit integer
primaryKey: true, // 定义主键
autoIncrement: true, //自动递增,
comment: "主键,自增"
},
name{
type:sequelize.STRING,
comment:"用户名"
},
account{
type:sequelize.STRING,
validate
},
pwd:{
type:sequelise.STRING,
comment:"用户密码,使用MD5加密"
}
}, {
underscored: true,
tableName:'user_roles'
});

在define()的参数中,第一个表示要转义的表名。
第二个是对表字段的设置,有基本的设置,比如type(数据类型),comment(对字段的描述),allowNull(是否为空),defaultValue(默认值),以后用到的时候再添加,细细描述。还有一个validate:{}这里面是确认属性,在这个属性里边还有好多的属性,比如确认一下是不是URL,可以写成这样:validate:{isUrl:true}。
第三个则是对表的设置,如underscored: true表示下划线的方式命名。

3.设置好了数据表,我们就要和数据库同步,使用sync()方法。

如:user.sync({force:true})在这里的force属性表示事先清空表的设置。接下来就可以对数据表进行操作了。

include makefile.inc NOW = $(shell date +"%Y-%m-%d(%H:%M:%S %z)") # Extra destination directories PKGDIR = ./output/$(MACHINE)/pkg/ define create_changelog @$(ECHO) "Update changelog" mv CHANGELOG.md CHANGELOG.md.bak head -n 9 CHANGELOG.md.bak > CHANGELOG.md $(ECHO) "" >> CHANGELOG.md $(ECHO) "## Release $(VERSION) - $(NOW)" >> CHANGELOG.md $(ECHO) "" >> CHANGELOG.md $(GIT) log --pretty=format:"- %s" $$($(GIT) describe --tags | grep -v "merge" | cut -d'-' -f1)..HEAD >> CHANGELOG.md $(ECHO) "" >> CHANGELOG.md tail -n +10 CHANGELOG.md.bak >> CHANGELOG.md rm CHANGELOG.md.bak endef # targets all: $(MAKE) -C src all clean: $(MAKE) -C src clean install: all $(INSTALL) -D -p -m 0644 output/$(MACHINE)/$(COMPONENT).so $(DEST)/usr/lib/amx/wan-manager/mod-ext/$(COMPONENT).so $(INSTALL) -D -p -m 0644 odl/mod-ipv6-passthrough.odl $(D)/etc/amx/wan-manager/extensions/mod-ipv6-passthrough.odl $(INSTALL) -D -p -m 0644 odl/ipv6-passthrough_defination.odl $(D)/etc/amx/wan-manager/extensions/ipv6-passthrough_defination.odl package: all $(INSTALL) -D -p -m 0644 output/$(MACHINE)/$(COMPONENT).so $(PKGDIR)/usr/lib/amx/wan-manager/mod-ext/$(COMPONENT).so $(INSTALL) -D -p -m 0644 odl/mod-ipv6-passthrough.odl $(D)/etc/amx/wan-manager/extensions/mod-ipv6-passthrough.odl $(INSTALL) -D -p -m 0644 odl/ipv6-passthrough_defination.odl $(D)/etc/amx/wan-manager/extensions/ipv6-passthrough_defination.odl cd $(PKGDIR) && $(TAR) -czvf ../$(COMPONENT)-$(VERSION).tar.gz . cp $(PKGDIR)../$(COMPONENT)-$(VERSION).tar.gz . make -C packages changelog: $(call create_changelog) doc: $(MAKE) -C doc doc test: $(MAKE) -C test run $(MAKE) -C test coverage .PHONY: all clean changelog install package doc test
10-28
from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch import torch.nn as nn import joblib from a_case_chosen import dataload_time_series_prediction from b_model_defination import lstm,conv1d,cnn_lstm,conv_lstm,attention_lstm,fully_connected_nn from keras.callbacks import EarlyStopping, ModelCheckpoint def normalize_test_set(train_data, val_data, test_data): X_train = train_data[:, :, :input_feature_num] y_train = train_data[:, :, input_feature_num:] X_val = val_data[:, :, :input_feature_num] y_val = val_data[:, :, input_feature_num:] X_test = test_data[:, :, :input_feature_num] y_test = test_data[:, :, input_feature_num:] X_train_2d = X_train.reshape(X_train.shape[0], X_train.shape[2]) y_train_2d = y_train.reshape(y_train.shape[0], y_train.shape[2]) scaler_X = StandardScaler() scaler_y = StandardScaler() scaler_X.fit(X_train_2d) scaler_y.fit(y_train_2d) joblib.dump(scaler_X, './scaler/scaler_X.pkl') joblib.dump(scaler_y, './scaler/scaler_y.pkl') X_train_scaled = scaler_X.transform(X_train_2d).reshape(-1, X_train_2d.shape[1]) y_train_scaled = scaler_y.transform(y_train_2d).reshape(-1, y_train_2d.shape[1]) X_val_scaled = scaler_X.transform(X_val.reshape(-1, input_feature_num)).reshape(-1, input_feature_num) y_val_scaled = scaler_y.transform(y_val.reshape(-1, y_val.shape[2])).reshape(-1, y_val.shape[2]) X_test_scaled = scaler_X.transform(X_test.reshape(-1, input_feature_num)).reshape(-1, input_feature_num) y_test_scaled = scaler_y.transform(y_test.reshape(-1, y_test.shape[2])).reshape(-1, y_test.shape[2]) print(f"X_train_scaled shape: {X_train_scaled.shape}") print(f"y_train_scaled shape: {y_train_scaled.shape}") print(f"X_val_scaled shape: {X_val_scaled.shape}") print(f"y_val_scaled shape: {y_val_scaled.shape}") print(f"X_test_scaled shape: {X_test_scaled.shape}") print(f"y_test_scaled shape: {y_test_scaled.shape}") return ( (X_train_scaled, y_train_scaled), (X_val_scaled, y_val_scaled), (X_test_scaled, y_test_scaled), scaler_X, scaler_y ) def model_fit(model,filepath): model.compile(loss = 'mse', optimizer='adam', metrics=['mse']) model.summary() early_stopping = EarlyStopping(monitor='val_loss', patience=patience, verbose=1) checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[early_stopping, checkpoint], validation_data=(X_val, y_val)) return model, history def predic(): predictions = [] for i in range(X_test.shape[0]): input_data = X_test[i:i + 1, :, :] pred = model.predict(input_data) print(i,input_data) predictions.append(pred) result = np.concatenate(predictions, axis=0) predic_inverse = scaler_y.inverse_transform(result) # (N*T, num_features) return predic_inverse def save_results(): true = test_data[:,:,input_feature_num:] true = true.reshape(true.shape[0],true.shape[2]) predic = result.reshape(result.shape[0],result.shape[1]) output = np.concatenate((true,predic),axis=1) print(output) df = pd.DataFrame({ 'observe position': true[:,0], 'observe max wave height': true[:,1], 'predic position':predic[:,0], 'predic max wave height': predic[:,1], }) df.to_csv(save_path , index=False) def plot_loss(history): plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='validation') plt.legend() plt.show() plt.close() if __name__ == '__main__': input_feature_num = 3 epochs = 1 patience = 500 batch_size = 32 model_save_path ='./model/cnn1.h5' save_path = './results/1.csv' train_data, val_data, test_data = dataload_time_series_prediction() (X_train, y_train), (X_val, y_val), (X_test, y_test), scaler_X, scaler_y = normalize_test_set(train_data, val_data, test_data) model = fully_connected_nn(input_feature_num, (train_data.shape[1]-input_feature_num)) model_trained, history = model_fit(model, model_save_path) result = predic() save_results() plot_loss(history) 报错Training set shape: (85, 1, 5) Validation set shape: (5, 1, 5) Test set shape: (5, 1, 5) Training set shape: (85, 1, 5) Validation set shape: (5, 1, 5) Test set shape: (5, 1, 5) X_train_scaled shape: (85, 3) y_train_scaled shape: (85, 2) X_val_scaled shape: (5, 3) y_val_scaled shape: (5, 2) X_test_scaled shape: (5, 3) y_test_scaled shape: (5, 2) Traceback (most recent call last): File "E:\pytorch_coad\parameters_wave_break\c_main_time.py", line 111, in <module> model = fully_connected_nn(input_feature_num, (train_data.shape[1]-input_feature_num)) File "E:\pytorch_coad\parameters_wave_break\b_model_defination.py", line 37, in fully_connected_nn model.add(Dense(pre_number, activation='linear', name='output_layer')) File "C:\Anaconda\envs\yolo5\lib\site-packages\keras\dtensor\utils.py", line 96, in _wrap_function init_method(layer_instance, *args, **kwargs) File "C:\Anaconda\envs\yolo5\lib\site-packages\keras\layers\core\dense.py", line 121, in __init__ raise ValueError( ValueError: Received an invalid value for `units`, expected a positive integer. Received: units=-2 Process finished with exit code 1
最新发布
11-26
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值