Train and deploy on Kubeflow from Notebooks
A tutorial of fastpages for Jupyter notebooks.
- Train and deploy on Kubeflow from Notebooks
- Prerequisites
- Install Required Libraries
- Code to train and predict
- Wrap Training and Prediction in a class
- Train your Model Locally
- Predict locally
- Use Kubeflow Fairing to Launch a K8s Job to train your model
- Use Kubeflow fairing to build the docker image
- Launch the K8s Job
- Deploy the trained model to Kubeflow for predictions
- Send an inference request to the prediction server
- Clean up the prediction endpoint
- Track Models and Artifacts
- Create a pipeline to train your model
Train and deploy on Kubeflow from Notebooks
This notebook shows you how to use Kubeflow to build, train, and deploy models on Kubernetes. This notebook walks you through the following
- Building an XGBoost model inside a notebook
- Training the model inside the notebook
- Performing inference using the model inside the notebook
- Using Kubeflow Fairing to launch training jobs on Kubernetes
- Using Kubeflow Fairing to build and deploy a model using Seldon Core
- Using Kubeflow metadata to record metadata about your models
- Using Kubeflow Pipelines to build a pipeline to train your model
Prerequisites
- This notebook assumes you are running inside 0.6 Kubeflow deployed on GKE following the GKE instructions
- If you are running somewhere other than GKE you will need to modify the notebook to use a different docker registry or else configure Kubeflow to work with GCR.
import os
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
import notebook_setup
notebook_setup.notebook_setup()
- Import the python libraries we will use
- We add a comment "fairing:include-cell" to tell the kubefow fairing preprocessor to keep this cell when converting to python code later
# fairing:include-cell
import fire
import joblib
import logging
import nbconvert
import os
import pathlib
import sys
from pathlib import Path
import pandas as pd
import pprint
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
from importlib import reload
from sklearn.datasets import make_regression
from kubeflow.metadata import metadata
from datetime import datetime
import retrying
import urllib3
# Imports not to be included in the built docker image
import util
import kfp
import kfp.components as comp
import kfp.gcp as gcp
import kfp.dsl as dsl
import kfp.compiler as compiler
from kubernetes import client as k8s_client
from kubeflow import fairing
from kubeflow.fairing.builders import append
from kubeflow.fairing.deployers import job
from kubeflow.fairing.preprocessors.converted_notebook import ConvertNotebookPreprocessorWithFire
# fairing:include-cell
def read_synthetic_input(test_size=0.25):
"""generate synthetic data and split it into train and test."""
# generate regression dataset
X, y = make_regression(n_samples=200, n_features=5, noise=0.1)
train_X, test_X, train_y, test_y = train_test_split(X,
y,
test_size=test_size,
shuffle=False)
imputer = SimpleImputer()
train_X = imputer.fit_transform(train_X)
test_X = imputer.transform(test_X)
return (train_X, train_y), (test_X, test_y)
# fairing:include-cell
def train_model(train_X,
train_y,
test_X,
test_y,
n_estimators,
learning_rate):
"""Train the model using XGBRegressor."""
model = XGBRegressor(n_estimators=n_estimators, learning_rate=learning_rate)
model.fit(train_X,
train_y,
early_stopping_rounds=40,
eval_set=[(test_X, test_y)])
print("Best RMSE on eval: %.2f with %d rounds",
model.best_score,
model.best_iteration+1)
return model
def eval_model(model, test_X, test_y):
"""Evaluate the model performance."""
predictions = model.predict(test_X)
mae=mean_absolute_error(predictions, test_y)
logging.info("mean_absolute_error=%.2f", mae)
return mae
def save_model(model, model_file):
"""Save XGBoost model for serving."""
joblib.dump(model, model_file)
logging.info("Model export success: %s", model_file)
def create_workspace():
METADATA_STORE_HOST = "metadata-grpc-service.kubeflow" # default DNS of Kubeflow Metadata gRPC serivce.
METADATA_STORE_PORT = 8080
return metadata.Workspace(
store=metadata.Store(grpc_host=METADATA_STORE_HOST, grpc_port=METADATA_STORE_PORT),
name="xgboost-synthetic",
description="workspace for xgboost-synthetic artifacts and executions")
# fairing:include-cell
class ModelServe(object):
def __init__(self, model_file=None):
self.n_estimators = 50
self.learning_rate = 0.1
if not model_file:
if "MODEL_FILE" in os.environ:
print("model_file not supplied; checking environment variable")
model_file = os.getenv("MODEL_FILE")
else:
print("model_file not supplied; using the default")
model_file = "mockup-model.dat"
self.model_file = model_file
print("model_file={0}".format(self.model_file))
self.model = None
self._workspace = None
self.exec = self.create_execution()
def train(self):
(train_X, train_y), (test_X, test_y) = read_synthetic_input()
# Here we use Kubeflow's metadata library to record information
# about the training run to Kubeflow's metadata store.
self.exec.log_input(metadata.DataSet(
description="xgboost synthetic data",
name="synthetic-data",
owner="someone@kubeflow.org",
uri="file://path/to/dataset",
version="v1.0.0"))
model = train_model(train_X,
train_y,
test_X,
test_y,
self.n_estimators,
self.learning_rate)
mae = eval_model(model, test_X, test_y)
# Here we log metrics about the model to Kubeflow's metadata store.
self.exec.log_output(metadata.Metrics(
name="xgboost-synthetic-traing-eval",
owner="someone@kubeflow.org",
description="training evaluation for xgboost synthetic",
uri="gcs://path/to/metrics",
metrics_type=metadata.Metrics.VALIDATION,
values={"mean_absolute_error": mae}))
save_model(model, self.model_file)
self.exec.log_output(metadata.Model(
name="housing-price-model",
description="housing price prediction model using synthetic data",
owner="someone@kubeflow.org",
uri=self.model_file,
model_type="linear_regression",
training_framework={
"name": "xgboost",
"version": "0.9.0"
},
hyperparameters={
"learning_rate": self.learning_rate,
"n_estimators": self.n_estimators
},
version=datetime.utcnow().isoformat("T")))
def predict(self, X, feature_names):
"""Predict using the model for given ndarray.
The predict signature should match the syntax expected by Seldon Core
https://github.com/SeldonIO/seldon-core so that we can use
Seldon h to wrap it a model server and deploy it on Kubernetes
"""
if not self.model:
self.model = joblib.load(self.model_file)
# Do any preprocessing
prediction = self.model.predict(data=X)
# Do any postprocessing
return [[prediction.item(0), prediction.item(1)]]
@property
def workspace(self):
if not self._workspace:
self._workspace = create_workspace()
return self._workspace
def create_execution(self):
r = metadata.Run(
workspace=self.workspace,
name="xgboost-synthetic-faring-run" + datetime.utcnow().isoformat("T"),
description="a notebook run")
return metadata.Execution(
name = "execution" + datetime.utcnow().isoformat("T"),
workspace=self.workspace,
run=r,
description="execution for training xgboost-synthetic")
model = ModelServe(model_file="mockup-model.dat")
model.train()
(train_X, train_y), (test_X, test_y) =read_synthetic_input()
ModelServe().predict(test_X, None)
Use Kubeflow Fairing to Launch a K8s Job to train your model
- Now that we have trained a model locally we can use Kubeflow fairing to
- Launch a Kubernetes job to train the model
- Deploy the model on Kubernetes
-
Launching a separate Kubernetes job to train the model has the following advantages
- You can leverage Kubernetes to run multiple training jobs in parallel
- You can run long running jobs without blocking your kernel
Configure The Docker Registry For Kubeflow Fairing
- In order to build docker images from your notebook we need a docker registry where the images will be stored
- Below you set some variables specifying a GCR container registry
- Kubeflow Fairing provides a utility function to guess the name of your GCP project
# Setting up google container repositories (GCR) for storing output containers
# You can use any docker container registry istead of GCR
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
DOCKER_REGISTRY = 'gcr.io/{}/fairing-job'.format(GCP_PROJECT)
Use Kubeflow fairing to build the docker image
- First you will use kubeflow fairing's kaniko builder to build a docker image that includes all your dependencies
- You use kaniko because you want to be able to run
pip
to install dependencies - Kaniko gives you the flexibility to build images from Dockerfiles
- You use kaniko because you want to be able to run
- kaniko, however, can be slow
- so you will build a base image using Kaniko and then every time your code changes you will just build an image starting from your base image and adding your code to it
- you use the kubeflow fairing build to enable these fast rebuilds
# TODO(https://github.com/kubeflow/fairing/issues/426): We should get rid of this once the default
# Kaniko image is updated to a newer image than 0.7.0.
from kubeflow.fairing import constants
constants.constants.KANIKO_IMAGE = "gcr.io/kaniko-project/executor:v0.14.0"
from kubeflow.fairing.builders import cluster
# output_map is a map of extra files to add to the notebook.
# It is a map from source location to the location inside the context.
output_map = {
"Dockerfile": "Dockerfile",
"requirements.txt": "requirements.txt",
}
preprocessor = ConvertNotebookPreprocessorWithFire(class_name='ModelServe', notebook_file='build-train-deploy.ipynb',
output_map=output_map)
if not preprocessor.input_files:
preprocessor.input_files = set()
input_files=["xgboost_util.py", "mockup-model.dat"]
preprocessor.input_files = set([os.path.normpath(f) for f in input_files])
preprocessor.preprocess()
Build the base image
- You use cluster_builder to build the base image
- You only need to perform this again if we change our Docker image or the dependencies we need to install
- ClusterBuilder takes as input the DockerImage to use as a base image
- You should use the same Jupyter image that you are using for your notebook server so that your environment will be the same when you launch Kubernetes jobs
# Use a stock jupyter image as our base image
# TODO(jlewi): Should we try to use the downward API to default to the image we are running in?
base_image = "gcr.io/kubeflow-images-public/tensorflow-1.14.0-notebook-cpu:v0.7.0"
# We use a custom Dockerfile
cluster_builder = cluster.cluster.ClusterBuilder(registry=DOCKER_REGISTRY,
base_image=base_image,
preprocessor=preprocessor,
dockerfile_path="Dockerfile",
pod_spec_mutators=[fairing.cloud.gcp.add_gcp_credentials_if_exists],
context_source=cluster.gcs_context.GCSContextSource())
cluster_builder.build()
Build the actual image
Here you use the append builder to add your code to the base image
-
Calling preprocessor.preprocess() converts your notebook file to a python file
- You are using the ConvertNotebookPreprocessorWithFire
-
This preprocessor converts ipynb files to py files by doing the following
- Removing all cells which don't have a comment
# fairing:include-cell
- Using python-fire to add entry points for the class specified in the constructor
- Removing all cells which don't have a comment
-
Call preprocess() will create the file build-train-deploy.py
-
You use the AppendBuilder to rapidly build a new docker image by quickly adding some files to an existing docker image
- The AppendBuilder is super fast so its very convenient for rebuilding your images as you iterate on your code
- The AppendBuilder will add the converted notebook, build-train-deploy.py, along with any files specified in
preprocessor.input_files
to/app
in the newly created image
preprocessor.preprocess()
builder = append.append.AppendBuilder(registry=DOCKER_REGISTRY,
base_image=cluster_builder.image_tag, preprocessor=preprocessor)
builder.build()
Launch the K8s Job
- You can use kubeflow fairing to easily launch a Kubernetes job to invoke code
- You use fairings Kubernetes job library to build a Kubernetes job
- You use pod mutators to attach GCP credentials to the pod
- You can also use pod mutators to attch PVCs
- Since the ConvertNotebookPreprocessorWithFire is using python-fire you can easily invoke any method inside the ModelServe class just by configuring the command invoked by the Kubernetes job
- In the cell below you extend the command to include
train
as an argument because you want to invoke the train function
- In the cell below you extend the command to include
Note When you invoke train_deployer.deploy; kubeflow fairing will stream the logs from the Kubernetes job. The job will initially show some connection errors because the job will try to connect to the metadataserver. You can ignore these errors; the job will retry until its able to connect and then continue
pod_spec = builder.generate_pod_spec()
train_deployer = job.job.Job(cleanup=False,
pod_spec_mutators=[
fairing.cloud.gcp.add_gcp_credentials_if_exists])
# Add command line arguments
pod_spec.containers[0].command.extend(["train"])
result = train_deployer.deploy(pod_spec)
- You can use kubectl to inspect the job that fairing created
!kubectl get jobs -l fairing-id={train_deployer.job_id} -o yaml
- Now that you have trained a model you can use kubeflow fairing to deploy it on Kubernetes
- When you call deployer.deploy fairing will create a Kubernetes Deployment to serve your model
- Kubeflow fairing uses the docker image you created earlier
- The docker image you created contains your code and Seldon core
- Kubeflow fairing uses Seldon to wrap your prediction code, ModelServe.predict, in a REST and gRPC server
from kubeflow.fairing.deployers import serving
pod_spec = builder.generate_pod_spec()
module_name = os.path.splitext(preprocessor.executable.name)[0]
deployer = serving.serving.Serving(module_name + ".ModelServe",
service_type="ClusterIP",
labels={"app": "mockup"})
url = deployer.deploy(pod_spec)
- You can use kubectl to inspect the deployment that fairing created
!kubectl get deploy -o yaml {deployer.deployment.metadata.name}
(train_X, train_y), (test_X, test_y) = read_synthetic_input()
result = util.predict_nparray(url, test_X)
pprint.pprint(result.content)
# !kubectl delete service -l app=ames
# !kubectl delete deploy -l app=ames
Track Models and Artifacts
- Using Kubeflow's metadata server you can track models and artifacts
- The ModelServe code was instrumented to log executions and outputs
- You can access Kubeflow's metadata UI by selecting Artifact Store from the central dashboard
- See here for instructions on connecting to Kubeflow's UIs
- You can also use the python SDK to read and write entries
- This notebook illustrates a bunch of metadata functionality
Create a workspace
- Kubeflow metadata uses workspaces as a logical grouping for artifacts, executions, and datasets that belong together
- Earlier in the notebook we defined the function
create_workspace
to create a workspace for this example - You can use that function to return a workspace object and then call list to see all the artifacts in that workspace
ws = create_workspace()
ws.list()
Create a pipeline to train your model
- Kubeflow pipelines makes it easy to define complex workflows to build and deploy models
- Below you will define and run a simple one step pipeline to train your model
- Kubeflow pipelines uses experiments to group different runs of a pipeline together
- So you start by defining a name for your experiement
Define the pipeline
-
To create a pipeline you create a function and decorate it with the
@dsl.pipeline
decorator- You use the decorator to give the pipeline a name and description
-
Inside the function, each step in the function is defined by a ContainerOp that specifies a container to invoke
-
You will use the container image that you built earlier using Kubeflow Fairing
- Since the Kubeflow Fairing preprocessor added a main function using python-fire, a step in your pipeline can invocation any function in the ModelServe class just by setting the command for the container op
- See the pipelines SDK reference for more information
@dsl.pipeline(
name='Training pipeline',
description='A pipeline that trains an xgboost model for the Ames dataset.'
)
def train_pipeline(
):
command=["python", preprocessor.executable.name, "train"]
train_op = dsl.ContainerOp(
name="train",
image=builder.image_tag,
command=command,
).apply(
gcp.use_gcp_secret('user-gcp-sa'),
)
train_op.container.working_dir = "/app"
- Pipelines need to be compiled
pipeline_func = train_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.zip'
compiler.Compiler().compile(pipeline_func, pipeline_filename)
- Pipelines groups runs using experiments
- So before you submit a pipeline you need to create an experiment or pick an existing experiment
- Once you have compiled a pipeline, you can use the pipelines SDK to submit that pipeline
EXPERIMENT_NAME = 'MockupModel'
#Specify pipeline argument values
arguments = {}
# Get or create an experiment and submit a pipeline run
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
#vvvvvvvvv This link leads to the run information page. (Note: There is a bug in JupyterLab that modifies the URL and makes the link stop working)