|
8 | 8 |
|
9 | 9 | LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-} |
10 | 10 | LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} |
| 11 | +LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR:-} |
| 12 | + |
11 | 13 | TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} |
12 | 14 | PYPI_VERSION=${PYPI_VERSION:-} |
13 | 15 | BUILD_PLATFORM=${BUILD_PLATFORM:-} |
|
106 | 108 |
|
107 | 109 | stack_mount="/app/llama-stack-source" |
108 | 110 | models_mount="/app/llama-models-source" |
| 111 | +client_mount="/app/llama-stack-client-source" |
109 | 112 |
|
110 | | -if [ -n "$LLAMA_MODELS_DIR" ]; then |
111 | | - if [ ! -d "$LLAMA_MODELS_DIR" ]; then |
112 | | - echo "${RED}Warning: LLAMA_MODELS_DIR is set but directory does not exist: $LLAMA_MODELS_DIR${NC}" >&2 |
| 113 | +install_local_package() { |
| 114 | + local dir="$1" |
| 115 | + local mount_point="$2" |
| 116 | + local name="$3" |
| 117 | + |
| 118 | + if [ ! -d "$dir" ]; then |
| 119 | + echo "${RED}Warning: $name is set but directory does not exist: $dir${NC}" >&2 |
113 | 120 | exit 1 |
114 | 121 | fi |
115 | 122 |
|
116 | 123 | if [ "$USE_COPY_NOT_MOUNT" = "true" ]; then |
117 | 124 | add_to_container << EOF |
118 | | -COPY $LLAMA_MODELS_DIR $models_mount |
| 125 | +COPY $dir $mount_point |
119 | 126 | EOF |
120 | 127 | fi |
121 | 128 | add_to_container << EOF |
122 | | -RUN uv pip install --no-cache -e $models_mount |
| 129 | +RUN uv pip install --no-cache -e $mount_point |
123 | 130 | EOF |
124 | | -fi |
| 131 | +} |
125 | 132 |
|
126 | | -if [ -n "$LLAMA_STACK_DIR" ]; then |
127 | | - if [ ! -d "$LLAMA_STACK_DIR" ]; then |
128 | | - echo "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}" >&2 |
129 | | - exit 1 |
130 | | - fi |
131 | 133 |
|
132 | | - # Install in editable format. We will mount the source code into the container |
133 | | - # so that changes will be reflected in the container without having to do a |
134 | | - # rebuild. This is just for development convenience. |
| 134 | +if [ -n "$LLAMA_MODELS_DIR" ]; then |
| 135 | + install_local_package "$LLAMA_MODELS_DIR" "$models_mount" "LLAMA_MODELS_DIR" |
| 136 | +fi |
135 | 137 |
|
136 | | - if [ "$USE_COPY_NOT_MOUNT" = "true" ]; then |
137 | | - add_to_container << EOF |
138 | | -COPY $LLAMA_STACK_DIR $stack_mount |
139 | | -EOF |
140 | | - fi |
| 138 | +if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then |
| 139 | + install_local_package "$LLAMA_STACK_CLIENT_DIR" "$client_mount" "LLAMA_STACK_CLIENT_DIR" |
| 140 | +fi |
141 | 141 |
|
142 | | - add_to_container << EOF |
143 | | -RUN uv pip install --no-cache -e $stack_mount |
144 | | -EOF |
| 142 | +if [ -n "$LLAMA_STACK_DIR" ]; then |
| 143 | + install_local_package "$LLAMA_STACK_DIR" "$stack_mount" "LLAMA_STACK_DIR" |
145 | 144 | else |
146 | 145 | if [ -n "$TEST_PYPI_VERSION" ]; then |
147 | 146 | # these packages are damaged in test-pypi, so install them first |
|
0 commit comments