|
1 | 1 | from pathlib import Path
|
2 | 2 | import logging
|
| 3 | +from urllib.parse import urlparse |
| 4 | +import subprocess |
| 5 | +import time |
3 | 6 |
|
4 | 7 | import redisai
|
| 8 | +import redis |
5 | 9 | import ml2rt
|
6 |
| -from mlflow.tracking.artifact_utils import _download_artifact_from_uri |
| 10 | +from mlflow.deployments import BaseDeploymentClient |
7 | 11 | from mlflow.exceptions import MlflowException
|
| 12 | +from mlflow.tracking.artifact_utils import _download_artifact_from_uri |
8 | 13 | from mlflow.models import Model
|
9 |
| -from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE, RESOURCE_DOES_NOT_EXIST |
10 |
| - |
11 |
| -from . import torchscript |
12 |
| -import mlflow.tensorflow |
13 |
| - |
14 |
| - |
15 |
| -_logger = logging.getLogger(__name__) |
16 |
| -SUPPORTED_DEPLOYMENT_FLAVORS = [torchscript.FLAVOR_NAME, mlflow.tensorflow.FLAVOR_NAME] |
17 |
| - |
18 |
| - |
19 |
| -_flavor2backend = { |
20 |
| - torchscript.FLAVOR_NAME: 'torch', |
21 |
| - mlflow.tensorflow.FLAVOR_NAME: 'tf'} |
22 |
| - |
23 |
| - |
24 |
| -def _get_preferred_deployment_flavor(model_config): |
25 |
| - """ |
26 |
| - Obtains the flavor that MLflow would prefer to use when deploying the model on RedisAI. |
27 |
| - If the model does not contain any supported flavors for deployment, an exception |
28 |
| - will be thrown. |
29 |
| -
|
30 |
| - :param model_config: An MLflow model object |
31 |
| - :return: The name of the preferred deployment flavor for the specified model |
32 |
| - """ |
33 |
| - # TODO: add onnx & TFlite |
34 |
| - if torchscript.FLAVOR_NAME in model_config.flavors: |
35 |
| - return torchscript.FLAVOR_NAME |
36 |
| - elif mlflow.tensorflow.FLAVOR_NAME in model_config.flavors: |
37 |
| - return mlflow.tensorflow.FLAVOR_NAME |
| 14 | +from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE |
| 15 | + |
| 16 | +from .utils import (get_preferred_deployment_flavor, validate_deployment_flavor, |
| 17 | + SUPPORTED_DEPLOYMENT_FLAVORS, flavor2backend, Config) |
| 18 | + |
| 19 | + |
| 20 | +logger = logging.getLogger(__name__) |
| 21 | + |
| 22 | + |
| 23 | +def target_help(): |
| 24 | + help_string = ("\nmlflow-redisai plugin integrates RedisAI to mlflow deployment pipeline. " |
| 25 | + "For detailed explanation and to see multiple examples, checkout the Readme at " |
| 26 | + "https://github.com/RedisAI/mlflow-redisai/blob/master/README.md \n\n" |
| 27 | + |
| 28 | + "Connection parameters: You can either use the URI to specify the connection " |
| 29 | + "parameters or specify them as environmental variables. If connection parameters " |
| 30 | + "are present in both URI and environmental variables, parameters from the " |
| 31 | + "environmental variables are ignored completely. The command with formatted " |
| 32 | + "URI would look like\n\n" |
| 33 | + |
| 34 | + " mlflow deployments <command> -t redisai:/<username>:<password>@<host>:<port>/<db>\n\n" |
| 35 | + |
| 36 | + "If you'd like to use the default values for parameters, only specify the " |
| 37 | + "target as given below \n\n" |
| 38 | + |
| 39 | + " mlflow deployments <command> -t redisai\n\n" |
| 40 | + |
| 41 | + "If you are going with environmental variables instead of URI parameters, the " |
| 42 | + "expected keys are \n\n" |
| 43 | + |
| 44 | + " * REDIS_HOST\n" |
| 45 | + " * REDIS_PORT\n" |
| 46 | + " * REDIS_DB\n" |
| 47 | + " * REDIS_USERNAME\n" |
| 48 | + " * REDIS_PASSWORD\n\n" |
| 49 | + |
| 50 | + "However, if you wish to go with default values, don't set any environmental " |
| 51 | + "variables\n\n" |
| 52 | + "Model configuration: The ``--config`` or ``-C`` option of ``create`` and " |
| 53 | + "``update`` API enables you to pass arguments specific to RedisAI deployment. " |
| 54 | + "The possible config options are\n\n" |
| 55 | + |
| 56 | + " * batchsize: Batch size for auto-batching\n" |
| 57 | + " * tag: Tag a deployment with a version number or a given name\n" |
| 58 | + " * device: CPU or GPU. if multiple GPUs are available, specify that too\n\n") |
| 59 | + return help_string |
| 60 | + |
| 61 | + |
| 62 | +def run_local(name, model_uri, flavor=None, config=None): |
| 63 | + device = config.get('device', 'cpu') |
| 64 | + if 'gpu' in device.lower(): |
| 65 | + commands = ['docker', 'run', '-p', '6379:6379', '--gpus', 'all', '--rm', 'redisai/redisai:latest'] |
38 | 66 | else:
|
39 |
| - raise MlflowException( |
40 |
| - message=( |
41 |
| - "The specified model does not contain any of the supported flavors for" |
42 |
| - " deployment. The model contains the following flavors: {model_flavors}." |
43 |
| - " Supported flavors: {supported_flavors}".format( |
44 |
| - model_flavors=model_config.flavors.keys(), |
45 |
| - supported_flavors=SUPPORTED_DEPLOYMENT_FLAVORS)), |
46 |
| - error_code=RESOURCE_DOES_NOT_EXIST) |
47 |
| - |
48 |
| - |
49 |
| -def _validate_deployment_flavor(model_config, flavor): |
50 |
| - """ |
51 |
| - Checks that the specified flavor is a supported deployment flavor |
52 |
| - and is contained in the specified model. If one of these conditions |
53 |
| - is not met, an exception is thrown. |
54 |
| -
|
55 |
| - :param model_config: An MLflow Model object |
56 |
| - :param flavor: The deployment flavor to validate |
57 |
| - """ |
58 |
| - if flavor not in SUPPORTED_DEPLOYMENT_FLAVORS: |
59 |
| - raise MlflowException( |
60 |
| - message=( |
61 |
| - "The specified flavor: `{flavor_name}` is not supported for deployment." |
62 |
| - " Please use one of the supported flavors: {supported_flavor_names}".format( |
63 |
| - flavor_name=flavor, |
64 |
| - supported_flavor_names=SUPPORTED_DEPLOYMENT_FLAVORS)), |
65 |
| - error_code=INVALID_PARAMETER_VALUE) |
66 |
| - elif flavor not in model_config.flavors: |
67 |
| - raise MlflowException( |
68 |
| - message=("The specified model does not contain the specified deployment flavor:" |
69 |
| - " `{flavor_name}`. Please use one of the following deployment flavors" |
70 |
| - " that the model contains: {model_flavors}".format( |
71 |
| - flavor_name=flavor, model_flavors=model_config.flavors.keys())), |
72 |
| - error_code=RESOURCE_DOES_NOT_EXIST) |
73 |
| - |
74 |
| - |
75 |
| -def deploy(model_key, model_uri, flavor=None, device='cpu', **kwargs): |
76 |
| - """ |
77 |
| - Deploy an MLFlow model to RedisAI. User needs to pass the URL and credentials |
78 |
| - to connect to RedisAI server. Currently it accepts only TorchScript model, freezed |
79 |
| - Tensorflow model and SavedModel from tensorflow through MLFlow although RedisAI |
80 |
| - can takes Tensorflow lite model, ONNX model (any models like scikit-learn, spark |
81 |
| - which is converted to ONNX). |
82 |
| -
|
83 |
| - Note: ml2rt is a package we have developed which can |
84 |
| - - do the conversion from different frameworks to ONNX |
85 |
| - - load SavedModel, freezed tensorflow, torchscript or ONNX models from disk |
86 |
| - - load script |
87 |
| -
|
88 |
| - :param model_key: Redis Key on which we deploy the model |
89 |
| - :param model_uri: The location, in URI format, of the MLflow model to deploy to RedisAI. |
90 |
| - For example: |
91 |
| -
|
92 |
| - - ``/Users/me/path/to/local/model`` |
93 |
| - - ``relative/path/to/local/model`` |
94 |
| - - ``s3://my_bucket/path/to/model`` |
95 |
| - - ``runs:/<mlflow_run_id>/run-relative/path/to/model`` |
96 |
| - - ``models:/<model_name>/<model_version>`` |
97 |
| - - ``models:/<model_name>/<stage>`` |
98 |
| -
|
99 |
| - For more information about supported URI schemes, see |
100 |
| - `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html# |
101 |
| - artifact-locations>`_. |
102 |
| - :param flavor: The name of the flavor of the model to use for deployment. Must be either |
103 |
| - ``None`` or one of mlflow_redisai.pytorch.SUPPORTED_DEPLOYMENT_FLAVORS. |
104 |
| - If ``None``, a flavor is automatically selected from the model's available |
105 |
| - flavors. If the specified flavor is not present or not supported for deployment, |
106 |
| - an exception will be thrown. |
107 |
| - :param device: GPU or CPU |
108 |
| - :param kwargs: Parameters for RedisAI connection |
109 |
| -
|
110 |
| - """ |
111 |
| - model_path = _download_artifact_from_uri(model_uri) |
112 |
| - # TODO: use os.path for python2.x compatiblity |
113 |
| - path = Path(model_path) |
114 |
| - model_config = path/'MLmodel' |
115 |
| - if not model_config.exists(): |
116 |
| - raise MlflowException( |
117 |
| - message=( |
118 |
| - "Failed to find MLmodel configuration within the specified model's" |
119 |
| - " root directory."), |
120 |
| - error_code=INVALID_PARAMETER_VALUE) |
121 |
| - model_config = Model.load(model_config) |
122 |
| - |
123 |
| - if flavor is None: |
124 |
| - flavor = _get_preferred_deployment_flavor(model_config) |
125 |
| - else: |
126 |
| - _validate_deployment_flavor(model_config, flavor) |
127 |
| - _logger.info("Using the %s flavor for deployment!", flavor) |
128 |
| - |
129 |
| - con = redisai.Client(**kwargs) |
130 |
| - if flavor == mlflow.tensorflow.FLAVOR_NAME: |
131 |
| - tags = model_config.flavors[flavor]['meta_graph_tags'] |
132 |
| - signaturedef = model_config.flavors[flavor]['signature_def_key'] |
133 |
| - model_dir = path/model_config.flavors[flavor]['saved_model_dir'] |
134 |
| - model, inputs, outputs = ml2rt.load_model(model_dir, tags, signaturedef) |
135 |
| - else: |
136 |
| - # TODO: this assumes the torchscript is saved using mlflow-redisai |
137 |
| - model_path = list(path.joinpath('data').iterdir())[0] |
138 |
| - if model_path.suffix != '.pt': |
139 |
| - raise RuntimeError("Model file does not have a valid suffix. Expected .pt") |
140 |
| - model = ml2rt.load_model(model_path) |
141 |
| - inputs = outputs = None |
142 |
| - try: |
143 |
| - device = redisai.Device.__members__[device] |
144 |
| - except KeyError: |
145 |
| - raise MlflowException( |
146 |
| - message="Invalid value for ``device``. It only accepts ``cpu`` or ``gpu``", |
147 |
| - error_code=INVALID_PARAMETER_VALUE) |
148 |
| - try: |
149 |
| - backend = _flavor2backend[flavor] |
150 |
| - except KeyError: |
151 |
| - raise MlflowException( |
152 |
| - message="Invalid value for ``backend``. It only accepts one of {}".format( |
153 |
| - _flavor2backend.keys() |
154 |
| - ), |
155 |
| - error_code=INVALID_PARAMETER_VALUE) |
156 |
| - backend = redisai.Backend.__members__[backend] |
157 |
| - con.modelset(model_key, backend, device, model, inputs=inputs, outputs=outputs) |
158 |
| - |
159 |
| - |
160 |
| -def delete(model_key, **kwargs): |
161 |
| - """ |
162 |
| - Delete a RedisAI model key and value. |
163 |
| -
|
164 |
| - :param model_key: Redis Key on which we deploy the model |
165 |
| - """ |
166 |
| - con = redisai.Client(**kwargs) |
167 |
| - con.modeldel(model_key) |
168 |
| - _logger.info("Deleted model with key: %s", model_key) |
169 |
| - |
| 67 | + commands = ['docker', 'run', '-p', '6379:6379', '--rm', 'redisai/redisai:latest'] |
| 68 | + proc = subprocess.Popen(commands) |
| 69 | + plugin = RedisAIPlugin('redisai:/localhost:6379/0') |
| 70 | + start_time = time.time() |
| 71 | + prev_num_interval = 0 |
| 72 | + while True: |
| 73 | + logger.info("Launching RedisAI docker container") |
| 74 | + try: |
| 75 | + if plugin.con.ping(): |
| 76 | + break |
| 77 | + except redis.exceptions.ConnectionError: |
| 78 | + num_interval, _ = divmod(time.time() - start_time, 10) |
| 79 | + if num_interval > prev_num_interval: |
| 80 | + prev_num_interval = num_interval |
| 81 | + try: |
| 82 | + proc.communicate(timeout=0.1) |
| 83 | + except subprocess.TimeoutExpired: |
| 84 | + pass |
| 85 | + else: |
| 86 | + raise RuntimeError("Could not start the RedisAI docker container. You can " |
| 87 | + "try setting up RedisAI locally by (by following the " |
| 88 | + "documentation https://oss.redislabs.com/redisai/quickstart/)" |
| 89 | + " and call the ``create`` API with target_uri as redisai as" |
| 90 | + "given in the example command below\n\n" |
| 91 | + " mlflow deployments create -t redisai -m <modeluri> ...\n\n") |
| 92 | + time.sleep(0.2) |
| 93 | + plugin.create_deployment(name, model_uri, flavor, config) |
| 94 | + |
| 95 | + |
| 96 | +class RedisAIPlugin(BaseDeploymentClient): |
| 97 | + def __init__(self, uri): |
| 98 | + super().__init__(uri) |
| 99 | + server_config = Config() |
| 100 | + path = urlparse(uri).path |
| 101 | + if path: |
| 102 | + uri = f"redis:/{path}" |
| 103 | + self.con = redisai.Client.from_url(uri) |
| 104 | + else: |
| 105 | + self.con = redisai.Client(**server_config) |
| 106 | + |
| 107 | + def create_deployment(self, name, model_uri, flavor=None, config=None): |
| 108 | + device = config.get('device', 'CPU') |
| 109 | + autobatch_size = config.get('batchsize') |
| 110 | + tag = config.get('tag') |
| 111 | + path = Path(_download_artifact_from_uri(model_uri)) |
| 112 | + model_config = path / 'MLmodel' |
| 113 | + if not model_config.exists(): |
| 114 | + raise MlflowException( |
| 115 | + message=( |
| 116 | + "Failed to find MLmodel configuration within the specified model's" |
| 117 | + " root directory."), |
| 118 | + error_code=INVALID_PARAMETER_VALUE) |
| 119 | + model_config = Model.load(model_config) |
| 120 | + |
| 121 | + if flavor is None: |
| 122 | + flavor = get_preferred_deployment_flavor(model_config) |
| 123 | + else: |
| 124 | + validate_deployment_flavor(model_config, flavor) |
| 125 | + logger.info("Using the {} flavor for deployment!".format(flavor)) |
| 126 | + |
| 127 | + if flavor == 'tensorflow': |
| 128 | + # TODO: test this for tf1.x and tf2.x |
| 129 | + tags = model_config.flavors[flavor]['meta_graph_tags'] |
| 130 | + signaturedef = model_config.flavors[flavor]['signature_def_key'] |
| 131 | + model_dir = path / model_config.flavors[flavor]['saved_model_dir'] |
| 132 | + model, inputs, outputs = ml2rt.load_model(model_dir, tags, signaturedef) |
| 133 | + else: |
| 134 | + model_path = None |
| 135 | + for file in path.iterdir(): |
| 136 | + if file.suffix == '.pt': |
| 137 | + model_path = file |
| 138 | + if model_path is None: |
| 139 | + raise RuntimeError("Model file does not have a valid suffix. Expected ``.pt``") |
| 140 | + model = ml2rt.load_model(model_path) |
| 141 | + inputs = outputs = None |
| 142 | + backend = flavor2backend[flavor] |
| 143 | + self.con.modelset(name, backend, device, model, inputs=inputs, outputs=outputs, batch=autobatch_size, tag=tag) |
| 144 | + return {'name': name, 'flavor': flavor} |
| 145 | + |
| 146 | + def delete_deployment(self, name): |
| 147 | + """ |
| 148 | + Delete a RedisAI model key and value. |
| 149 | +
|
| 150 | + :param name: Redis Key on which we deploy the model |
| 151 | + """ |
| 152 | + self.con.modeldel(name) |
| 153 | + logger.info("Deleted model with key: {}".format(name)) |
| 154 | + |
| 155 | + def update_deployment(self, name, model_uri=None, flavor=None, config=None): |
| 156 | + try: |
| 157 | + self.con.modelget(name, meta_only=True) |
| 158 | + except redis.exceptions.ConnectionError: |
| 159 | + raise MlflowException("Model doesn't exist. If you trying to create new " |
| 160 | + "deployment, use ``create_deployment``") |
| 161 | + else: |
| 162 | + ret = self.create_deployment(name, model_uri, flavor, config=config) |
| 163 | + return {'flavor': ret['flavor']} |
| 164 | + |
| 165 | + def list_deployments(self, **kwargs): |
| 166 | + return self.con.modelscan() |
| 167 | + |
| 168 | + def get_deployment(self, name): |
| 169 | + return self.con.modelget(name, meta_only=True) |
| 170 | + |
| 171 | + def predict(self, deployment_name, df): |
| 172 | + nparray = df.to_numpy() |
| 173 | + self.con.tensorset('array', nparray) |
| 174 | + # TODO: manage multiple inputs and multiple outputs |
| 175 | + self.con.modelrun(deployment_name, inputs=['array'], outputs=['output']) |
| 176 | + return self.con.tensorget('output') |
0 commit comments