Skip to content

Commit

Permalink
Address tech proofer comments
Browse files Browse the repository at this point in the history
Signed-off-by: terrytangyuan <[email protected]>
  • Loading branch information
terrytangyuan committed Feb 1, 2023
1 parent cac427e commit 5e71ac8
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 18 deletions.
1 change: 1 addition & 0 deletions code/project/code/autoscaled-inference-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,6 @@ spec:
model:
modelFormat:
name: tensorflow
# This is only needed on Mac M1
image: "emacski/tensorflow-serving:2.6.0"
storageUri: "pvc://strategy-volume/saved_model_versions"
1 change: 1 addition & 0 deletions code/project/code/inference-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ spec:
model:
modelFormat:
name: tensorflow
# This is only needed on Mac M1
image: "emacski/tensorflow-serving:2.6.0"
# https://kserve.github.io/website/modelserving/storage/pvc/pvc/
# Note that we are skipping `mountPath: /trained_model`
Expand Down
5 changes: 1 addition & 4 deletions code/project/code/model-selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@
import os


BUFFER_SIZE = 10000
BATCH_SIZE = 64

# Scaling MNIST data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
Expand All @@ -21,7 +18,7 @@ def scale(image, label):
model_path = "trained_model/saved_model_versions/" + str(i)
model = keras.models.load_model(model_path)
datasets, _ = tfds.load(name='fashion_mnist', with_info=True, as_supervised=True)
ds = datasets['test'].map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
ds = datasets['test'].map(scale).cache().shuffle(10000).batch(64)
_, accuracy = model.evaluate(ds)
if accuracy > best_accuracy:
best_accuracy = accuracy
Expand Down
3 changes: 0 additions & 3 deletions code/project/code/multi-worker-distributed-training.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ def scale(image, label):
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE)


# TODO: Use different models and pick top two for model serving
def build_and_compile_cnn_model():
print("Training CNN model")
model = models.Sequential()
Expand Down Expand Up @@ -215,8 +214,6 @@ def is_chief():
tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')
TASK_INDEX = tf_config['task']['index']

# TODO: Add flag to train different models

parser = argparse.ArgumentParser()
parser.add_argument('--saved_model_dir',
type=str,
Expand Down
5 changes: 1 addition & 4 deletions code/project/code/predict-service.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@

model = keras.models.load_model("trained_model/saved_model_versions")

BUFFER_SIZE = 10000
BATCH_SIZE = 64

# Scaling MNIST data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
Expand All @@ -17,7 +14,7 @@ def scale(image, label):

datasets, _ = tfds.load(name='fashion_mnist', with_info=True, as_supervised=True)

ds = datasets['test'].map(scale).cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
ds = datasets['test'].map(scale).cache().shuffle(10000).batch(64)

# TODO: Visualize the images and compare with the classified result
model.predict(ds)
13 changes: 6 additions & 7 deletions code/project/code/workflow.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,6 @@ metadata:
namespace: kubeflow
spec:
entrypoint: tfjob-wf
podGC:
strategy: OnPodSuccess
volumes:
- name: model
persistentVolumeClaim:
claimName: strategy-volume

templates:
- name: tfjob-wf
steps:
Expand All @@ -23,6 +16,12 @@ spec:
template: model-selection-step
- - name: create-model-serving-service
template: create-model-serving-service
podGC:
strategy: OnPodSuccess
volumes:
- name: model
persistentVolumeClaim:
claimName: strategy-volume

- name: data-ingestion-step
serviceAccountName: argo
Expand Down

0 comments on commit 5e71ac8

Please sign in to comment.