-
Notifications
You must be signed in to change notification settings - Fork 46
Expand file tree
/
Copy pathDockerfile
More file actions
89 lines (67 loc) · 3.81 KB
/
Dockerfile
File metadata and controls
89 lines (67 loc) · 3.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
ARG GOLANG_BASE_IMG=golang:1.25.8
ARG OPERATOR_CONTROLLER_BASE_IMAGE=registry.access.redhat.com/ubi9/ubi-minimal:9.7
# Build the manager binary
FROM ${GOLANG_BASE_IMG} AS builder
USER root
WORKDIR /opt/app-root/src
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# Add the vendored dependencies
COPY vendor vendor
# Copy the go source
COPY api api
COPY cmd cmd
COPY internal internal
# Copy Makefile
COPY Makefile Makefile
# Copy the .git directory which is needed to store the build info
COPY .git .git
# Copy the License
COPY LICENSE LICENSE
# Copy the helm charts
COPY helm-charts-k8s helm-charts-k8s
# need to decompress nfd subchart for k8s chart, in preparation for copying out CRD
RUN cd helm-charts-k8s/charts && \
tar -xvzf node-feature-discovery-chart-0.18.3.tgz
ARG TARGET
# Build
RUN git config --global --add safe.directory ${PWD} && make ${TARGET}
RUN curl -LO https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x ./kubectl
FROM ${OPERATOR_CONTROLLER_BASE_IMAGE}
ARG TARGET
COPY --from=builder /opt/app-root/src/${TARGET} /usr/local/bin/manager
COPY --from=builder /opt/app-root/src/kubectl /usr/local/bin/kubectl
COPY --from=builder /opt/app-root/src/LICENSE /licenses/LICENSE
COPY --from=builder /opt/app-root/src/helm-charts-k8s/crds/deviceconfig-crd.yaml \
/opt/app-root/src/helm-charts-k8s/crds/remediationworkflowstatus-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/node-feature-discovery/crds/nfd-api-crds.yaml \
/opt/app-root/src/helm-charts-k8s/charts/kmm/crds/module-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/kmm/crds/nodemodulesconfig-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/clusterworkflowtemplate-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/cronworkflow-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/workflowartifactgctask-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/workflow-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/workfloweventbinding-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/workflowtaskresult-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/workflowtaskset-crd.yaml \
/opt/app-root/src/helm-charts-k8s/charts/remediation-crds/crds/workflowtemplate-crd.yaml \
/opt/helm-charts-crds-k8s/
RUN mkdir -p /remediation
COPY --from=builder /opt/app-root/src/internal/controllers/remediation/configs /remediation/configs
COPY --from=builder /opt/app-root/src/internal/controllers/remediation/scripts /remediation/scripts
RUN microdnf update -y && \
microdnf install -y shadow-utils jq && \
microdnf clean all
RUN ["groupadd", "--system", "-g", "201", "amd-gpu"]
RUN ["useradd", "--system", "-u", "201", "-g", "201", "-s", "/sbin/nologin", "amd-gpu"]
USER 201:201
LABEL name="amd-gpu-operator" \
maintainer="yan.sun3@amd.com,shrey.ajmera@amd.com,nitish.bhat@amd.com,praveenkumar.shanmugam@amd.com" \
vendor="Advanced Micro Devices, Inc." \
version="dev" \
release="dev" \
summary="The AMD GPU Operator simplifies the management and deployment of AMD GPUs on Kubernetes cluster" \
description="The AMD GPU Operator controller manager images are essential for managing, deploying, and orchestrating AMD GPU resources and operations within Kubernetes clusters. It streamline various tasks, including automated driver installation and management, easy deployment of the AMD GPU device plugin, and metrics collection and export. Its operands could simplify GPU resource allocation for containers, automatically label worker nodes with GPU properties, and provide comprehensive GPU health monitoring and troubleshooting."
ENTRYPOINT ["/usr/local/bin/manager"]