Inference.zip Folder structure:
requirements.txt
Trained Model file
inference.py
Other files and folders used
Inference.py file format:
Import Statements
Onetime executable operations
{Ex: Loading the Model, label encoding etc.}
def predict(Input arguments as per the use-case)
{
Data Preprocessing
Inference
Return output based on the use-case
}
*Do not change the naming convention for the entities marked inblueimport pickle
import numpy as np
import pandas as pd
model = pickle.load(open('model.pkl', 'rb'))
class_names = ['setosa', 'versicolor', 'virginica']
def predict(df): #argument df is a pandas dataframe
df = df[["SepalLengthCm", "SepalWidthCm", "PetalLengthCm", "PetalWidthCm"]]
numpy_array = df.to_numpy()
# Predict
predictions = model.predict(numpy_array)
output = [class_names[class_predicted] for class_predicted in predictions]
return output #return will be a list of strings
sklearn==0.0
numpy
pandas
Inference.zip Folder structure:
requirements.txt
Trained Model file
inference.py
Other files and folders used
Inference.py file format:
Import Statements
Onetime executable operations
{Ex: Loading the Model, label encoding etc.}
def predict(Input arguments as per the use-case)
{
Data Preprocessing
Inference
Return output based on the use-case
}
*Do not change the naming convention for the entities marked inbluefrom tensorflow.keras.models import load_model
import numpy as np
labels = ["positive", "negative", "neutral"]
model = load_model("model.h5")
def predict(input_text):
#preprocessing
yhat = model.predict(input_text)
yhat = np.array(yhat)
indices = np.argmax(yhat, axis=1)
scores = yhat[np.arange(len(yhat)), indices]
predicted_categories = [labels[i] for i in indices]
output = predicted_categories[0]
return output
tensorflow==2.4
numpy
Inference.zip Folder structure:
requirements.txt
Trained Model file
inference.py
Other files and folders used
Inference.py file format:
Import Statements
Onetime executable operations
{Ex: Loading the Model, label encoding etc.}
def predict(Input arguments as per the use-case)
{
Data Preprocessing
Inference
Return output based on the use-case
}
*Do not change the naming convention for the entities marked inblueimport os, sys
import cv2
import numpy as np
import tensorflow as tf
#load the model and any other custom defined functions/variables
detection_model = tf.saved_model.load('saved_model')
def predict(img, output_directory): #mandatory function: First arg: img path to read from, Second arg: Dir to save the output image
img = cv2.imread(img)
#code for predictions
#........
#.........
output_file = output_directory + "image.jpg"
cv2.imwrite(output_file,image_np_with_detections)
return output_file
tensorflow==2.4.1
numpy
opencv-python-headless
Inference.zip Folder structure:
requirements.txt
Trained Model file
inference.py
Other files and folders used
Inference.py file format:
Import Statements
Onetime executable operations
{Ex: Loading the Model, label encoding etc.}
def predict(Input arguments as per the use-case)
{
Data Preprocessing
Inference
Return output based on the use-case
}
*Do not change the naming convention for the entities marked inblueimport numpy as np
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array
from tensorflow.keras.models import load_model
classes = ["person", "car", "truck"]
model = load_model("model.h5")
def predict(img_path): #mandatory: function name should be predict and it accepts a string which is image location
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
yhat = np.array(yhat)
indices = np.argmax(yhat, axis=1)
scores = yhat[np.arange(len(yhat)), indices]
predicted_categories = [classes[i] for i in indices]
output = predicted_categories[0]
return output #mandatory: the return should be a string
tensorflow==2.4
numpy
pillow
import nltk
nltk.data.path.append("/tmp")
nltk.download(["punkt", "stopwords"], download_dir = "/tmp")
import nltk
nltk.data.path.append("/tmp")
nltk.download(["punkt", "stopwords"], download_dir = "/tmp")