云服务器内容精选

  • 保存模型(tf接口) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 # 导出模型 # 模型需要采用saved_model接口保存 print('Exporting trained model to', export_path) builder = tf.saved_model.builder.SavedModelBuilder(export_path) tensor_info_x = tf.saved_model.utils.build_tensor_info(x) tensor_info_y = tf.saved_model.utils.build_tensor_info(y) # 定义预测接口的inputs和outputs # inputs和outputs字典的key值会作为模型输入输出tensor的索引键 # 模型输入输出定义需要和推理自定义脚本相匹配 prediction_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_x}, outputs={'scores': tensor_info_y}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op') builder.add_meta_graph_and_variables( # tag设为serve/tf.saved_model.tag_constants.SERVING sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ 'predict_images': prediction_signature, }, legacy_init_op=legacy_init_op) builder.save() print('Done exporting!')
  • 保存模型(keras接口) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 from keras import backend as K # K.get_session().run(tf.global_variables_initializer()) # 定义预测接口的inputs和outputs # inputs和outputs字典的key值会作为模型输入输出tensor的索引键 # 模型输入输出定义需要和推理自定义脚本相匹配 predict_signature = tf.saved_model.signature_def_utils.predict_signature_def( inputs={"images" : model.input}, outputs={"scores" : model.output} ) # 定义保存路径 builder = tf.saved_model.builder.SavedModelBuilder('./mnist_keras/') builder.add_meta_graph_and_variables( sess = K.get_session(), # 推理部署需要定义tf.saved_model.tag_constants.SERVING标签 tags=[tf.saved_model.tag_constants.SERVING], """ signature_def_map:items只能有一个,或者需要定义相应的key为 tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY """ signature_def_map={ tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predict_signature } ) builder.save()
  • 训练模型(keras接口) 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 from keras.models import Sequential model = Sequential() from keras.layers import Dense import tensorflow as tf # 导入训练数据集 mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 print(x_train.shape) from keras.layers import Dense from keras.models import Sequential import keras from keras.layers import Dense, Activation, Flatten, Dropout # 定义模型网络 model = Sequential() model.add(Flatten(input_shape=(28,28))) model.add(Dense(units=5120,activation='relu')) model.add(Dropout(0.2)) model.add(Dense(units=10, activation='softmax')) # 定义优化器,损失函数等 model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() # 训练 model.fit(x_train, y_train, epochs=2) # 评估 model.evaluate(x_test, y_test)
  • 训练并保存模型 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 import pandas as pd import xgboost as xgb from sklearn.model_selection import train_test_split # Prepare training data and setting parameters iris = pd.read_csv('/home/ma-user/work/iris.csv') X = iris.drop(['variety'],axis=1) y = iris[['variety']] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234565) params = { 'booster': 'gbtree', 'objective': 'multi:softmax', 'num_class': 3, 'gamma': 0.1, 'max_depth': 6, 'lambda': 2, 'subsample': 0.7, 'colsample_bytree': 0.7, 'min_child_weight': 3, 'silent': 1, 'eta': 0.1, 'seed': 1000, 'nthread': 4, } plst = params.items() dtrain = xgb.DMatrix(X_train, y_train) num_rounds = 500 model = xgb.train(plst, dtrain, num_rounds) model.save_model('/tmp/xgboost.m') 训练前请先下载iris.csv数据集,解压后上传至Notebook本地路径/home/ma-user/work/。iris.csv数据集下载地址:https://gist.github.com/netj/8836201。Notebook上传文件操作请参见上传本地文件至Notebook中。 保存完模型后,需要上传到OBS目录才能发布。发布时需要带上config.json配置和推理代码customize_service.py。config.json编写请参考模型配置文件编写说明,推理代码请参考推理代码。
  • 推理代码 在模型代码推理文件customize_service.py中,需要添加一个子类,该子类继承对应模型类型的父类,各模型类型的父类名称和导入语句如请参考表1。 # coding:utf-8 import collections import json import xgboost as xgb from model_service.python_model_service import XgSklServingBaseService class UserService(XgSklServingBaseService): # request data preprocess def _preprocess(self, data): list_data = [] json_data = json.loads(data, object_pairs_hook=collections.OrderedDict) for element in json_data["data"]["req_data"]: array = [] for each in element: array.append(element[each]) list_data.append(array) return list_data # predict def _inference(self, data): xg_model = xgb.Booster(model_file=self.model_path) pre_data = xgb.DMatrix(data) pre_result = xg_model.predict(pre_data) pre_result = pre_result.tolist() return pre_result # predict result process def _postprocess(self,data): resp_data = [] for element in data: resp_data.append({"predictresult": element}) return resp_data