-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathiris_schema_gen.py
More file actions
43 lines (35 loc) · 1.41 KB
/
iris_schema_gen.py
File metadata and controls
43 lines (35 loc) · 1.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# This script generates the scoring and schema files
# necessary to operationalize the Iris Classification sample
#Init and run functions
from azure.ml.api.schema.dataTypes import DataTypes
from azure.ml.api.schema.sampleDefinition import SampleDefinition
from azure.ml.api.realtime.services import generate_schema
import pandas
# Prepare the web service definition by authoring
# init() and run() functions. Test the functions
# before deploying the web service.
def init():
from sklearn.externals import joblib
# load the model file
global model
model = joblib.load('model.pkl')
def run(input_df):
import json
# append 40 random features just like the training script does it.
import numpy as np
n = 40
random_state = np.random.RandomState(0)
n_samples, n_features = input_df.shape
input_df = np.c_[input_df, random_state.randn(n_samples, n)]
pred = model.predict(input_df)
return json.dumps(str(pred[0]))
df = pandas.DataFrame(data=[[3.0, 3.6, 1.3, 0.25]], columns=['sepal length', 'sepal width','petal length','petal width'])
df.dtypes
df
init()
input1 = pandas.DataFrame([[3.0, 3.6, 1.3, 0.25]])
run(input1)
inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
# The prepare statement writes the scoring file (main.py) and
# the schema file (service_schema.json) the the output folder.
generate_schema(run_func=run, inputs=inputs, filepath='service_schema.json')