diff --git a/.github/workflows/ci_release.yml b/.github/workflows/ci_release.yml new file mode 100644 index 0000000000..1107efcb65 --- /dev/null +++ b/.github/workflows/ci_release.yml @@ -0,0 +1,205 @@ +name: JuiceFS CSI Release +on: + push: + branches: + - master + - release-* + +jobs: + release: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.version.outputs.version }} + steps: + - name: "Checkout Code" + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Determine version + id: version + run: | + # Get the latest tag that matches v* pattern + LATEST_TAG=$(git describe --tags --match 'v*' --abbrev=0 2>/dev/null || echo "v0.0.0") + + # Extract major, minor, patch + VERSION=${LATEST_TAG#v} + MAJOR=$(echo $VERSION | cut -d. -f1) + MINOR=$(echo $VERSION | cut -d. -f2) + PATCH=$(echo $VERSION | cut -d. -f3) + + # Increment patch version + PATCH=$((PATCH + 1)) + NEXT_VERSION="v${MAJOR}.${MINOR}.${PATCH}" + + echo "version=${NEXT_VERSION}" >> $GITHUB_OUTPUT + echo "Next version: ${NEXT_VERSION}" + + - name: Create tag + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git tag -a ${{ steps.version.outputs.version }} -m "Release ${{ steps.version.outputs.version }}" + git push origin ${{ steps.version.outputs.version }} + + build-csi: + needs: release + runs-on: ubuntu-latest + strategy: + matrix: + region: + - name: us-east-1 + identifier: virginia + - name: eu-west-2 + identifier: london + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ matrix.region.name }} + + - name: Login to ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - uses: depot/setup-action@v1 + + - name: Build and Push CSI Driver + uses: depot/build-push-action@v1 + with: + platforms: linux/amd64,linux/arm64 + context: . + file: docker/csi.Dockerfile + build-contexts: | + project=. + ui=./dashboard-ui-v2 + build-args: | + TARGETARCH=amd64 + JFSCHAN=stable + JUICEFS_CE_MOUNT_IMAGE=juicedata/mount:ce-latest + JUICEFS_EE_MOUNT_IMAGE=juicedata/mount:ee-5.2.8-1305e8c + provenance: false + token: ${{ secrets.DEPOT_TOKEN }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-driver-${{ matrix.region.identifier }}:${{ needs.release.outputs.version }} + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-driver-${{ matrix.region.identifier }}:latest + + build-mount-images: + needs: release + runs-on: ubuntu-latest + strategy: + matrix: + region: + - name: us-east-1 + identifier: virginia + - name: eu-west-2 + identifier: london + edition: + - name: ce + dockerfile: ce.juicefs.Dockerfile + build-args: | + CEJUICEFS_VERSION=latest + - name: ee + dockerfile: ee.juicefs.Dockerfile + build-args: | + EEJUICEFS_VERSION=5.2.8-1305e8c + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ matrix.region.name }} + + - name: Login to ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - uses: depot/setup-action@v1 + + - name: Build and Push Mount Image (${{ matrix.edition.name }}) + uses: depot/build-push-action@v1 + with: + platforms: linux/amd64,linux/arm64 + context: docker + file: docker/${{ matrix.edition.dockerfile }} + build-args: ${{ matrix.edition.build-args }} + provenance: false + token: ${{ secrets.DEPOT_TOKEN }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/juicefs-mount-${{ matrix.edition.name }}-${{ matrix.region.identifier }}:${{ needs.release.outputs.version }} + ${{ steps.login-ecr.outputs.registry }}/juicefs-mount-${{ matrix.edition.name }}-${{ matrix.region.identifier }}:latest + + build-dashboard: + needs: release + runs-on: ubuntu-latest + strategy: + matrix: + region: + - name: us-east-1 + identifier: virginia + - name: eu-west-2 + identifier: london + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + name: Install pnpm + with: + version: 9 + run_install: | + cwd: dashboard-ui-v2 + + - name: Build dashboard + run: make dashboard-dist + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ matrix.region.name }} + + - name: Login to ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - uses: depot/setup-action@v1 + + - name: Build and Push Dashboard + uses: depot/build-push-action@v1 + with: + platforms: linux/amd64,linux/arm64 + context: . + file: docker/dashboard.Dockerfile + build-contexts: | + project=. + ui=./dashboard-ui-v2 + provenance: false + token: ${{ secrets.DEPOT_TOKEN }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-dashboard-${{ matrix.region.identifier }}:${{ needs.release.outputs.version }} + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-dashboard-${{ matrix.region.identifier }}:latest + + notify: + needs: [release, build-csi, build-mount-images, build-dashboard] + runs-on: ubuntu-latest + if: always() && needs.build-csi.result == 'success' && needs.build-mount-images.result == 'success' && needs.build-dashboard.result == 'success' + steps: + - name: Notify Slack! + uses: someimportantcompany/github-actions-slack-message@v1 + with: + webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + text: ":rocket: JuiceFS CSI Driver ${{ needs.release.outputs.version }} images built and pushed to all ECR registries! :rocket:" \ No newline at end of file diff --git a/.github/workflows/ci_update_dev.yml b/.github/workflows/ci_update_dev.yml new file mode 100644 index 0000000000..2971e6fa41 --- /dev/null +++ b/.github/workflows/ci_update_dev.yml @@ -0,0 +1,151 @@ +name: JuiceFS CSI Dev CI/CD +on: + pull_request: + branches: + - master + - release-* + push: + branches: + - development + - dev + +jobs: + build-csi: + runs-on: ubuntu-latest + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Login to ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - uses: depot/setup-action@v1 + + - name: Build and Push CSI Driver + uses: depot/build-push-action@v1 + with: + platforms: linux/amd64,linux/arm64 + context: . + file: docker/csi.Dockerfile + build-contexts: | + project=. + ui=./dashboard-ui-v2 + build-args: | + TARGETARCH=amd64 + JFSCHAN=stable + JUICEFS_CE_MOUNT_IMAGE=juicedata/mount:ce-latest + JUICEFS_EE_MOUNT_IMAGE=juicedata/mount:ee-5.2.8-1305e8c + provenance: false + token: ${{ secrets.DEPOT_TOKEN }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-driver-dev-virginia:dev + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-driver-dev-virginia:pr-${{ github.event.pull_request.number || 'push' }} + + build-mount-images: + runs-on: ubuntu-latest + strategy: + matrix: + edition: + - name: ce + dockerfile: ce.juicefs.Dockerfile + build-args: | + CEJUICEFS_VERSION=latest + - name: ee + dockerfile: ee.juicefs.Dockerfile + build-args: | + EEJUICEFS_VERSION=5.2.8-1305e8c + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Login to ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - uses: depot/setup-action@v1 + + - name: Build and Push Mount Image (${{ matrix.edition.name }}) + uses: depot/build-push-action@v1 + with: + platforms: linux/amd64,linux/arm64 + context: docker + file: docker/${{ matrix.edition.dockerfile }} + build-args: ${{ matrix.edition.build-args }} + provenance: false + token: ${{ secrets.DEPOT_TOKEN }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/juicefs-mount-${{ matrix.edition.name }}-dev-virginia:dev + ${{ steps.login-ecr.outputs.registry }}/juicefs-mount-${{ matrix.edition.name }}-dev-virginia:pr-${{ github.event.pull_request.number || 'push' }} + + build-dashboard: + runs-on: ubuntu-latest + steps: + - name: "Checkout" + uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + name: Install pnpm + with: + version: 9 + run_install: | + cwd: dashboard-ui-v2 + + - name: Build dashboard + run: make dashboard-dist + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + + - name: Login to ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - uses: depot/setup-action@v1 + + - name: Build and Push Dashboard + uses: depot/build-push-action@v1 + with: + platforms: linux/amd64,linux/arm64 + context: . + file: docker/dashboard.Dockerfile + build-contexts: | + project=. + ui=./dashboard-ui-v2 + provenance: false + token: ${{ secrets.DEPOT_TOKEN }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-dashboard-dev-virginia:dev + ${{ steps.login-ecr.outputs.registry }}/juicefs-csi-dashboard-dev-virginia:pr-${{ github.event.pull_request.number || 'push' }} + + notify: + needs: [build-csi, build-mount-images, build-dashboard] + runs-on: ubuntu-latest + if: always() && needs.build-csi.result == 'success' && needs.build-mount-images.result == 'success' && needs.build-dashboard.result == 'success' + steps: + - name: Notify Slack! + uses: someimportantcompany/github-actions-slack-message@v1 + with: + webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + text: ":construction: JuiceFS CSI Driver dev images built and pushed to ECR! :construction:" \ No newline at end of file diff --git a/.gitignore b/.gitignore index a4f09580c7..83f91d06e4 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,4 @@ cov2.out yalc.lock .ropeproject dist/ +*.test diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc index 4b6210e0e9..bc41882c24 100644 --- a/.markdownlint-cli2.jsonc +++ b/.markdownlint-cli2.jsonc @@ -47,6 +47,7 @@ }, "link-fragments": false, "no-trailing-slash-in-links": true, + "MD029": false, "enhanced-proper-names": { "code_blocks": false, "html_elements": false, diff --git a/deploy/kubernetes/csi-daemonset-mount/daemonset.yaml b/deploy/kubernetes/csi-daemonset-mount/daemonset.yaml new file mode 100644 index 0000000000..486a1b35b5 --- /dev/null +++ b/deploy/kubernetes/csi-daemonset-mount/daemonset.yaml @@ -0,0 +1,70 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: juicefs-csi-node + namespace: kube-system +spec: + selector: + matchLabels: + app: juicefs-csi-node + template: + metadata: + labels: + app: juicefs-csi-node + spec: + containers: + - name: juicefs-plugin + image: juicedata/juicefs-csi-driver:latest + args: + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --nodeid=$(NODE_NAME) + - --v=1 + - --enable-manager=true + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: JUICEFS_MOUNT_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: JUICEFS_MOUNT_PATH + value: /var/lib/juicefs/volume + - name: JUICEFS_CONFIG_PATH + value: /var/lib/juicefs/config + # Enable StorageClass mount sharing + - name: STORAGE_CLASS_SHARE_MOUNT + value: "true" + # Enable DaemonSet deployment for shared mounts + - name: STORAGE_CLASS_DAEMONSET + value: "true" + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + mountPropagation: HostToContainer + volumes: + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.juicefs.com + type: DirectoryOrCreate + - name: device-dir + hostPath: + path: /dev + type: Directory \ No newline at end of file diff --git a/deploy/kubernetes/csi-daemonset-mount/mount-config.yaml b/deploy/kubernetes/csi-daemonset-mount/mount-config.yaml new file mode 100644 index 0000000000..baeca87860 --- /dev/null +++ b/deploy/kubernetes/csi-daemonset-mount/mount-config.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default configuration for all StorageClasses + default: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + + # StorageClass "juicefs-sc" configuration + juicefs-sc: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: juicefs/mount-node + operator: In + values: + - "true" diff --git a/deploy/kubernetes/csi-daemonset-mount/storageclass-example.yaml b/deploy/kubernetes/csi-daemonset-mount/storageclass-example.yaml new file mode 100644 index 0000000000..7e3060a87e --- /dev/null +++ b/deploy/kubernetes/csi-daemonset-mount/storageclass-example.yaml @@ -0,0 +1,49 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: juicefs-sc-daemonset +provisioner: csi.juicefs.com +volumeBindingMode: Immediate +parameters: + csi.storage.k8s.io/provisioner-secret-name: juicefs-secret + csi.storage.k8s.io/provisioner-secret-namespace: kube-system + csi.storage.k8s.io/node-publish-secret-name: juicefs-secret + csi.storage.k8s.io/node-publish-secret-namespace: kube-system + + # JuiceFS specific parameters + pathPattern: "${pvc.name}" + + # Node affinity configuration for DaemonSet mount pods + # This example will deploy mount pods only on nodes with specific labels + nodeAffinity: | + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: juicefs/mount-node + operator: In + values: + - "true" + - key: node-role.kubernetes.io/worker + operator: Exists + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: juicefs/high-performance + operator: In + values: + - "true" +--- +apiVersion: v1 +kind: Secret +metadata: + name: juicefs-secret + namespace: kube-system +type: Opaque +stringData: + name: "test-volume" + metaurl: "redis://redis-service:6379/1" + storage: "s3" + bucket: "https://mybucket.s3.us-west-2.amazonaws.com" + access-key: "ACCESS_KEY" + secret-key: "SECRET_KEY" \ No newline at end of file diff --git a/deploy/kubernetes/mount-config/configmap-examples.yaml b/deploy/kubernetes/mount-config/configmap-examples.yaml new file mode 100644 index 0000000000..18892da912 --- /dev/null +++ b/deploy/kubernetes/mount-config/configmap-examples.yaml @@ -0,0 +1,131 @@ +# Example 1: Default configuration with mixed modes +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default configuration for all StorageClasses + default: | + mode: shared-pod # Use shared mount pods by default + + # High-performance StorageClass uses DaemonSet on specific nodes + high-performance-sc: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node.kubernetes.io/instance-type + operator: In + values: + - m5.xlarge + - m5.2xlarge + - m5.4xlarge + + # Development StorageClass uses per-PVC pods for isolation + development-sc: | + mode: per-pvc + + # Production StorageClass uses DaemonSet with node affinity + production-sc: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: juicefs/mount-node + operator: In + values: + - "true" + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: juicefs/high-performance + operator: In + values: + - "true" + +--- +# Example 2: Simple configuration - all StorageClasses use shared pods +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + default: | + mode: shared-pod + +--- +# Example 3: DaemonSet-only configuration with different node affinities +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default: DaemonSet on all worker nodes + default: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + + # GPU workloads: DaemonSet only on GPU nodes + gpu-storage: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: nvidia.com/gpu + operator: Exists + + # Database storage: DaemonSet on dedicated database nodes + database-storage: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - database + +--- +# Example 4: Migration scenario - gradually move from per-PVC to shared/DaemonSet +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default: Keep existing per-PVC behavior + default: | + mode: per-pvc + + # New StorageClasses use shared pods + juicefs-sc-v2: | + mode: shared-pod + + # High-traffic StorageClass uses DaemonSet + juicefs-sc-high-traffic: | + mode: daemonset + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + preference: + matchExpressions: + - key: node.kubernetes.io/instance-type + operator: In + values: + - m5.large + - m5.xlarge \ No newline at end of file diff --git a/docs/en/guide/daemonset-mount.md b/docs/en/guide/daemonset-mount.md new file mode 100644 index 0000000000..75527cf3e3 --- /dev/null +++ b/docs/en/guide/daemonset-mount.md @@ -0,0 +1,214 @@ +# DaemonSet Mount for StorageClass + +This feature allows JuiceFS CSI Driver to deploy Mount Pods as DaemonSets instead of individual Pods when using StorageClass with mount sharing enabled. This provides better resource management and control over which nodes run Mount Pods. + +## Overview + +When `STORAGE_CLASS_SHARE_MOUNT` is enabled, JuiceFS CSI Driver shares Mount Pods across multiple PVCs that use the same StorageClass. By default, these are created as individual Pods. With the DaemonSet option, Mount Pods are deployed as DaemonSets, providing: + +- **Better resource control**: DaemonSets ensure one Mount Pod per selected node +- **Node affinity support**: Control which nodes run Mount Pods using nodeAffinity +- **Automatic lifecycle management**: DaemonSets handle Pod creation/deletion automatically +- **Simplified operations**: Easier to manage and monitor Mount Pods +- **Works with existing StorageClasses**: No need to modify or recreate StorageClasses + +## Configuration + +### Enable DaemonSet Mount + +DaemonSet mount is automatically available when mount sharing is enabled. To enable mount sharing, set this environment variable in the CSI Driver deployment: + +```yaml +env: + - name: STORAGE_CLASS_SHARE_MOUNT + value: "true" +``` + +Once enabled, you can configure specific StorageClasses to use DaemonSet mode via the ConfigMap (see below). + +### Configure Node Affinity + +There are two ways to configure node affinity for DaemonSet Mount Pods: + +#### Method 1: ConfigMap (Recommended for existing StorageClasses) + +Create a ConfigMap to define node affinity for your StorageClasses without modifying them: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default configuration for all StorageClasses + default: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + + # Configuration for specific StorageClass by name + my-existing-storageclass: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: juicefs/mount-node + operator: In + values: + - "true" +``` + +This method works with existing StorageClasses without any modifications. + +#### Method 2: StorageClass Parameters (For new StorageClasses) + +For new StorageClasses, you can specify `nodeAffinity` directly in the parameters: + +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: juicefs-sc-daemonset +provisioner: csi.juicefs.com +parameters: + # ... other parameters ... + + # Node affinity configuration for DaemonSet Mount Pods + nodeAffinity: | + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: juicefs/mount-node + operator: In + values: + - "true" +``` + +## How It Works + +1. When a PVC is created using a StorageClass with DaemonSet mount enabled: + - The CSI Driver checks if a DaemonSet for this StorageClass already exists + - If not, it looks for node affinity configuration: + - First checks the ConfigMap for StorageClass-specific or default configuration + - Falls back to StorageClass parameters if specified + - Creates a new DaemonSet with the configured node affinity + - If DaemonSet exists, it adds a reference to the existing DaemonSet + +2. The DaemonSet ensures Mount Pods are running on selected nodes: + - Pods are automatically created on nodes matching the affinity rules + - Mount paths are shared across PVCs using the same StorageClass + +3. When a PVC is deleted: + - The reference is removed from the DaemonSet + - If no references remain, the DaemonSet is deleted + +## Priority Order + +The system checks for node affinity configuration in this order: + +1. **StorageClass parameters** (if `nodeAffinity` is specified) +2. **ConfigMap with StorageClass name** as key +3. **ConfigMap default** configuration +4. **No affinity** (DaemonSet runs on all nodes) + +## Use Cases + +### Dedicated Mount Nodes + +Label specific nodes for running Mount Pods: + +```bash +kubectl label nodes node1 node2 node3 juicefs/mount-node=true +``` + +Then use nodeAffinity in StorageClass to target these nodes. + +### High-Performance Nodes + +Prefer nodes with better resources for Mount Pods: + +```yaml +nodeAffinity: | + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: node.kubernetes.io/instance-type + operator: In + values: + - m5.xlarge + - m5.2xlarge +``` + +### Exclude Control Plane + +Prevent Mount Pods from running on control plane nodes: + +```yaml +nodeAffinity: | + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist +``` + +## Monitoring + +You can monitor DaemonSet Mount Pods using standard Kubernetes commands: + +```bash +# List all mount DaemonSets +kubectl get daemonset -n kube-system | grep juicefs + +# Check DaemonSet status +kubectl describe daemonset juicefs--mount-ds -n kube-system + +# List pods created by DaemonSet +kubectl get pods -n kube-system -l juicefs.com/mount-by=juicefs-csi-driver +``` + +## Limitations + +- Node affinity is only applied when `STORAGE_CLASS_SHARE_MOUNT` is enabled and DaemonSet mode is configured +- All PVCs using the same StorageClass share the same DaemonSet and node affinity rules +- Changing node affinity requires recreating the DaemonSet (happens automatically when all PVCs are deleted) + +## Migration + +To migrate from Pod-based mounts to DaemonSet mounts: + +1. Enable the feature flags in CSI Driver +2. Create a new StorageClass with desired node affinity +3. Migrate PVCs to the new StorageClass +4. Old Mount Pods will be replaced by DaemonSet Pods + +## Troubleshooting + +### DaemonSet Pods not created + +Check if nodes match the affinity rules: + +```bash +kubectl get nodes --show-labels | grep +``` + +### Mount Pods on unexpected nodes + +Verify the nodeAffinity configuration: + +```bash +kubectl get storageclass -o yaml | grep -A 10 nodeAffinity +``` + +### References not cleaned up + +Check DaemonSet annotations: + +```bash +kubectl get daemonset -n kube-system -o jsonpath='{.metadata.annotations}' +``` diff --git a/docs/en/guide/mount-pod-configuration.md b/docs/en/guide/mount-pod-configuration.md new file mode 100644 index 0000000000..ec8a81f566 --- /dev/null +++ b/docs/en/guide/mount-pod-configuration.md @@ -0,0 +1,385 @@ +# Mount Pod Configuration Guide + +JuiceFS CSI Driver provides flexible Mount Pod deployment options that can be configured per StorageClass. This allows you to optimize resource usage and performance based on your specific needs. + +## Overview + +JuiceFS CSI Driver supports three Mount Pod deployment modes: + +1. **Per-PVC Mode** (`per-pvc`): Creates a separate Mount Pod for each PVC +2. **Shared Pod Mode** (`shared-pod`): Shares Mount Pods across PVCs using the same StorageClass +3. **DaemonSet Mode** (`daemonset`): Deploys Mount Pods as DaemonSets with node affinity support + +## Configuration Methods + +### Global Defaults (Environment Variables) + +Set default behavior for all StorageClasses via environment variables in the CSI Driver: + +```yaml +env: + # Enable mount sharing (defaults to shared-pod mode) + - name: STORAGE_CLASS_SHARE_MOUNT + value: "true" +``` + +When `STORAGE_CLASS_SHARE_MOUNT` is enabled: +- Default mode is `shared-pod` +- DaemonSet mode can be configured per StorageClass via ConfigMap +- Per-PVC mode can still be explicitly configured via ConfigMap + +### Per-StorageClass Configuration (ConfigMap) + +Override global defaults and configure individual StorageClasses using a ConfigMap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default configuration for all StorageClasses + default: | + mode: shared-pod + + # Configuration for specific StorageClass + my-storage-class: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: juicefs/mount-node + operator: In + values: + - "true" +``` + +## Automatic Fallback Behavior + +When using DaemonSet mode, the CSI Driver automatically falls back to shared Pod mode if: + +1. The DaemonSet cannot schedule a Pod on the node due to nodeAffinity restrictions +2. The node has taints that prevent the DaemonSet Pod from being scheduled +3. The DaemonSet Pod fails to become ready within the timeout period + +This ensures that workloads can still mount volumes even if the DaemonSet configuration prevents Pods from running on certain nodes. + +## Mount Modes Explained + +### Per-PVC Mode + +Each PVC gets its own dedicated Mount Pod. + +**Advantages:** + +- Complete isolation between PVCs +- Simple troubleshooting +- Independent lifecycle management + +**Use Cases:** + +- Development environments +- Multi-tenant scenarios requiring strict isolation +- Applications with specific mount configurations + +**Configuration:** + +```yaml +mode: per-pvc +``` + +### Shared Pod Mode + +Multiple PVCs using the same StorageClass share Mount Pods. + +**Advantages:** + +- Reduced resource consumption +- Fewer Pods to manage +- Shared cache benefits + +**Use Cases:** + +- Production environments with many PVCs +- Clusters with resource constraints +- Applications with similar access patterns + +**Configuration:** + +```yaml +mode: shared-pod +``` + +### DaemonSet Mode + +Mount Pods are deployed as DaemonSets across selected nodes. + +**Advantages:** + +- Predictable Pod placement +- Node-level resource optimization +- Automatic scaling with node additions +- Centralized node affinity control + +**Use Cases:** + +- High-performance computing +- Dedicated Mount nodes +- GPU workloads +- Large-scale deployments + +**Configuration:** + +```yaml +mode: daemonset +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-type + operator: In + values: + - compute +``` + +## Configuration Priority + +The system determines mount mode in this order: + +1. **ConfigMap with StorageClass name** - Highest priority +2. **ConfigMap default configuration** +3. **Global environment variables** - Lowest priority + +## Working with Existing StorageClasses + +The ConfigMap approach allows you to change mount behavior **without modifying existing StorageClasses**: + +1. Create the ConfigMap with desired configuration +2. New PVCs will use the new mount mode +3. Existing PVCs continue using their current Mount Pods + +## Examples + +### Example 1: Mixed Mode Deployment + +Different StorageClasses use different modes: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Default: shared pods for most workloads + default: | + mode: shared-pod + + # High-performance workloads use DaemonSet + high-performance-sc: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: workload-type + operator: In + values: + - compute + + # Development uses per-PVC for isolation + development-sc: | + mode: per-pvc +``` + +### Example 2: Gradual Migration + +Migrate from per-PVC to shared/DaemonSet mode: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # Keep existing behavior for old StorageClasses + default: | + mode: per-pvc + + # New StorageClasses use shared mode + juicefs-sc-v2: | + mode: shared-pod + + # Critical workloads use DaemonSet + juicefs-sc-critical: | + mode: daemonset +``` + +### Example 3: Node-Specific DaemonSets + +Deploy Mount Pods on specific node types: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + # GPU workloads + gpu-storage: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: nvidia.com/gpu + operator: Exists + + # CPU-intensive workloads + cpu-storage: | + mode: daemonset + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node.kubernetes.io/instance-type + operator: In + values: + - c5.xlarge + - c5.2xlarge +``` + +## Monitoring and Management + +### List Mount Pods by Type + +```bash +# Per-PVC pods +kubectl get pods -n kube-system -l juicefs.com/mount-by=juicefs-csi-driver | grep -v "juicefs-.*-mount-ds" + +# Shared pods (look for pods with multiple PVC references) +kubectl get pods -n kube-system -l juicefs.com/mount-by=juicefs-csi-driver -o json | \ + jq '.items[] | select(.metadata.annotations | length > 1) | .metadata.name' + +# DaemonSet pods +kubectl get daemonset -n kube-system | grep juicefs +kubectl get pods -n kube-system -l juicefs.com/mount-by=juicefs-csi-driver | grep "juicefs-.*-mount-ds" +``` + +### Check Configuration + +```bash +# View current ConfigMap +kubectl get configmap juicefs-mount-config -n kube-system -o yaml + +# Check which mode a StorageClass is using +kubectl get configmap juicefs-mount-config -n kube-system -o jsonpath='{.data.my-storage-class}' +``` + +## Best Practices + +1. **Start with shared-Pod mode** for most workloads +2. **Use DaemonSet mode** for: + - High-performance requirements + - Predictable resource allocation + - Node-specific optimizations +3. **Use per-PVC mode** for: + - Development/testing + - Strict isolation requirements + - Troubleshooting +4. **Test configuration changes** in non-production first +5. **Monitor resource usage** after mode changes +6. **Document your configuration** choices + +## Troubleshooting + +### Mount Pods not created as expected + +1. Check ConfigMap exists and is valid: + +```bash +kubectl get configmap juicefs-mount-config -n kube-system +``` + +2. Verify CSI Driver can read ConfigMap: + +```bash +kubectl logs -n kube-system daemonset/juicefs-csi-node | grep "mount-config" +``` + +3. Check for syntax errors in ConfigMap: + +```bash +kubectl get configmap juicefs-mount-config -n kube-system -o yaml | \ + yq eval '.data.default' - | kubectl create --dry-run=client -f - +``` + +### DaemonSet Pods not scheduled + +1. Verify node affinity matches existing nodes: + +```bash +kubectl get nodes --show-labels +``` + +2. Check DaemonSet status: + +```bash +kubectl describe daemonset -n kube-system juicefs--mount-ds +``` + +### Switching modes for existing PVCs + +Existing PVCs continue using their current Mount Pods. To switch modes: + +1. Update ConfigMap with new configuration +2. Delete existing PVCs (ensure data is backed up) +3. Recreate PVCs to use new mode + +## Migration Guide + +### From Environment Variables to ConfigMap + +1. Create ConfigMap with current behavior: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: juicefs-mount-config + namespace: kube-system +data: + default: | + mode: shared-pod # or per-pvc/daemonset based on current env vars +``` + +2. Deploy ConfigMap: + +```bash +kubectl apply -f juicefs-mount-config.yaml +``` + +3. New PVCs will use ConfigMap configuration +4. Optionally remove environment variables from CSI Driver + +### From Per-PVC to Shared/DaemonSet + +1. Update ConfigMap for specific StorageClasses +2. New PVCs use new mode automatically +3. Optionally migrate existing PVCs during maintenance Windows + +## Summary + +The Mount Pod configuration system provides: + +- **Flexibility**: Different modes for different workloads +- **Compatibility**: Works with existing StorageClasses +- **Simplicity**: Centralized configuration via ConfigMap +- **Power**: Fine-grained control with node affinity +- **Safety**: Non-disruptive to existing workloads + +Choose the appropriate mode based on your workload requirements, resource constraints, and operational preferences. diff --git a/pkg/config/config.go b/pkg/config/config.go index 0b295000f0..db1c153de2 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -50,7 +50,7 @@ var ( Webhook = false // start webhook server, used in sidecar mode or validating/mutating webhook ValidatingWebhook = false // start validating webhook, applicable to ee only Immutable = false // csi driver is running in an immutable environment - StorageClassShareMount = false // share mount pod for the same storage class + StorageClassShareMount = false // share mount pod for the same storage class (enables shared-pod and daemonset modes) AccessToKubelet = false // access kubelet or not DriverName = "csi.juicefs.com" diff --git a/pkg/config/mount_config.go b/pkg/config/mount_config.go new file mode 100644 index 0000000000..997c12c10e --- /dev/null +++ b/pkg/config/mount_config.go @@ -0,0 +1,206 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" + + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" +) + +const ( + MountConfigMapName = "juicefs-mount-config" + DefaultConfigKey = "default" +) + +// MountMode defines how mount pods are deployed +type MountMode string + +const ( + // MountModePVC creates a separate mount pod for each PVC + MountModePVC MountMode = "per-pvc" + // MountModeSharedPod creates shared mount pods per StorageClass + MountModeSharedPod MountMode = "shared-pod" + // MountModeDaemonSet creates DaemonSets for mount pods per StorageClass + MountModeDaemonSet MountMode = "daemonset" +) + +// MountConfig represents the complete mount configuration for a StorageClass +type MountConfig struct { + // Mode specifies how mount pods are deployed: per-pvc, shared-pod, or daemonset + Mode MountMode `yaml:"mode,omitempty"` + + // NodeAffinity is used when Mode is daemonset + NodeAffinity *corev1.NodeAffinity `yaml:"nodeAffinity,omitempty"` + + // Additional mount pod configurations can be added here in the future + // For example: + // Resources *corev1.ResourceRequirements `yaml:"resources,omitempty"` + // Tolerations []corev1.Toleration `yaml:"tolerations,omitempty"` + // Labels map[string]string `yaml:"labels,omitempty"` + // Annotations map[string]string `yaml:"annotations,omitempty"` +} + +// GetMountConfig retrieves the mount configuration for a given StorageClass +func GetMountConfig(ctx context.Context, client *k8sclient.K8sClient, storageClassName string) (*MountConfig, error) { + log := klog.NewKlogr().WithName("mount-config") + + // Start with global defaults from environment variables + defaultConfig := &MountConfig{ + Mode: getDefaultMountMode(), + } + + // Try to get the ConfigMap + configMap, err := client.GetConfigMap(ctx, MountConfigMapName, Namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + log.V(1).Info("Mount ConfigMap not found, using global defaults", + "configMap", MountConfigMapName, "namespace", Namespace, "mode", defaultConfig.Mode) + return defaultConfig, nil + } + return nil, fmt.Errorf("failed to get mount ConfigMap: %v", err) + } + + // Try to get StorageClass-specific configuration + if configData, exists := configMap.Data[storageClassName]; exists { + log.V(1).Info("Found StorageClass-specific mount configuration", + "storageClass", storageClassName) + config, err := parseMountConfig(configData) + if err != nil { + log.Error(err, "Failed to parse StorageClass-specific configuration, using defaults", + "storageClass", storageClassName) + return defaultConfig, nil + } + // Fill in any missing values with defaults + if config.Mode == "" { + config.Mode = defaultConfig.Mode + } + return config, nil + } + + // Try default configuration from ConfigMap + if configData, exists := configMap.Data[DefaultConfigKey]; exists { + log.V(1).Info("Using default mount configuration from ConfigMap for StorageClass", + "storageClass", storageClassName) + config, err := parseMountConfig(configData) + if err != nil { + log.Error(err, "Failed to parse default configuration, using global defaults") + return defaultConfig, nil + } + // Fill in any missing values with defaults + if config.Mode == "" { + config.Mode = defaultConfig.Mode + } + return config, nil + } + + log.V(1).Info("No mount configuration found in ConfigMap, using global defaults", + "storageClass", storageClassName, "mode", defaultConfig.Mode) + return defaultConfig, nil +} + +// parseMountConfig parses the configuration string into a MountConfig +func parseMountConfig(configData string) (*MountConfig, error) { + config := &MountConfig{} + if err := yaml.Unmarshal([]byte(configData), config); err != nil { + return nil, fmt.Errorf("failed to parse mount configuration: %v", err) + } + + // Validate mount mode + if config.Mode != "" && + config.Mode != MountModePVC && + config.Mode != MountModeSharedPod && + config.Mode != MountModeDaemonSet { + return nil, fmt.Errorf("invalid mount mode: %s", config.Mode) + } + + return config, nil +} + +// getDefaultMountMode returns the default mount mode based on environment variables +func getDefaultMountMode() MountMode { + // Check global environment variable settings + // When StorageClassShareMount is enabled, default to shared-pod + // DaemonSet mode is only used when explicitly configured via ConfigMap + if StorageClassShareMount { + return MountModeSharedPod + } + return MountModePVC +} + +// LoadMountConfig loads mount configuration for a JfsSetting +func LoadMountConfig(ctx context.Context, client *k8sclient.K8sClient, jfsSetting *JfsSetting) error { + log := klog.NewKlogr().WithName("mount-config") + + // Get StorageClass name from PV if available + storageClassName := "" + if jfsSetting.PV != nil && jfsSetting.PV.Spec.StorageClassName != "" { + storageClassName = jfsSetting.PV.Spec.StorageClassName + } else { + // For static provisioning or when PV is not available, + // use the unique ID as the key in ConfigMap + storageClassName = jfsSetting.UniqueId + } + + config, err := GetMountConfig(ctx, client, storageClassName) + if err != nil { + log.Error(err, "Failed to get mount configuration, using defaults", + "storageClass", storageClassName) + // Don't fail mount if ConfigMap is misconfigured + // Just proceed with defaults + config = &MountConfig{ + Mode: getDefaultMountMode(), + } + } + + // Store the mount mode and configuration in JfsSetting + jfsSetting.MountMode = string(config.Mode) + if config.Mode == MountModeDaemonSet && config.NodeAffinity != nil { + jfsSetting.StorageClassNodeAffinity = config.NodeAffinity + log.Info("Loaded mount configuration", + "storageClass", storageClassName, + "mode", config.Mode, + "hasNodeAffinity", true) + } else { + log.Info("Loaded mount configuration", + "storageClass", storageClassName, + "mode", config.Mode) + } + + return nil +} + +// ShouldUseDaemonSet checks if DaemonSet should be used for the given JfsSetting +func ShouldUseDaemonSet(jfsSetting *JfsSetting) bool { + return jfsSetting.MountMode == string(MountModeDaemonSet) +} + +// ShouldUseSharedPod checks if shared pod should be used for the given JfsSetting +func ShouldUseSharedPod(jfsSetting *JfsSetting) bool { + return jfsSetting.MountMode == string(MountModeSharedPod) +} + +// ShouldUsePVCPod checks if per-PVC pod should be used for the given JfsSetting +func ShouldUsePVCPod(jfsSetting *JfsSetting) bool { + return jfsSetting.MountMode == string(MountModePVC) || jfsSetting.MountMode == "" +} \ No newline at end of file diff --git a/pkg/config/mount_config_helper.go b/pkg/config/mount_config_helper.go new file mode 100644 index 0000000000..f95963f901 --- /dev/null +++ b/pkg/config/mount_config_helper.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" + + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" +) + +// Helper functions for DaemonSet node affinity configuration +// The main MountConfig struct is defined in mount_config.go + +// GetDaemonSetNodeAffinity retrieves the DaemonSet node affinity configuration for a given StorageClass +// It first checks for a StorageClass-specific configuration, then falls back to default +func GetDaemonSetNodeAffinity(ctx context.Context, client *k8sclient.K8sClient, storageClassName string) (*corev1.NodeAffinity, error) { + log := klog.NewKlogr().WithName("mount-config") + + // Try to get the ConfigMap + configMap, err := client.GetConfigMap(ctx, MountConfigMapName, Namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + log.V(1).Info("Mount ConfigMap not found, using no node affinity", + "configMap", MountConfigMapName, "namespace", Namespace) + return nil, nil // No ConfigMap means no node affinity restrictions + } + return nil, fmt.Errorf("failed to get Mount ConfigMap: %v", err) + } + + // Try to get StorageClass-specific configuration + if configData, exists := configMap.Data[storageClassName]; exists { + log.V(1).Info("Found StorageClass-specific mount configuration", + "storageClass", storageClassName) + return parseDaemonSetNodeAffinity(configData) + } + + // Fall back to default configuration + if configData, exists := configMap.Data[DefaultConfigKey]; exists { + log.V(1).Info("Using default mount configuration for StorageClass", + "storageClass", storageClassName) + return parseDaemonSetNodeAffinity(configData) + } + + log.V(1).Info("No mount configuration found for StorageClass", + "storageClass", storageClassName) + return nil, nil +} + +// parseDaemonSetNodeAffinity parses the configuration string to extract NodeAffinity +func parseDaemonSetNodeAffinity(configData string) (*corev1.NodeAffinity, error) { + config := &MountConfig{} + if err := yaml.Unmarshal([]byte(configData), config); err != nil { + return nil, fmt.Errorf("failed to parse mount configuration: %v", err) + } + return config.NodeAffinity, nil +} + +// LoadDaemonSetNodeAffinity loads node affinity for a StorageClass from ConfigMap +// This is called when creating or updating a DaemonSet for mount pods +func LoadDaemonSetNodeAffinity(ctx context.Context, client *k8sclient.K8sClient, jfsSetting *JfsSetting) error { + log := klog.NewKlogr().WithName("mount-config") + + // This should only be called when mount sharing is enabled + // If we're here without mount sharing, it's a programming error + if !StorageClassShareMount { + log.Error(nil, "LoadDaemonSetNodeAffinity called but StorageClassShareMount is false - this should not happen") + return fmt.Errorf("LoadDaemonSetNodeAffinity called without mount sharing enabled") + } + + // Skip if node affinity already set (from StorageClass parameters) + if jfsSetting.StorageClassNodeAffinity != nil { + log.V(1).Info("Node affinity already set from StorageClass parameters") + return nil + } + + // Get StorageClass name from PV if available + storageClassName := "" + if jfsSetting.PV != nil && jfsSetting.PV.Spec.StorageClassName != "" { + storageClassName = jfsSetting.PV.Spec.StorageClassName + } else { + // For static provisioning or when PV is not available, + // use the unique ID as the key in ConfigMap + storageClassName = jfsSetting.UniqueId + } + + nodeAffinity, err := GetDaemonSetNodeAffinity(ctx, client, storageClassName) + if err != nil { + log.Error(err, "Failed to get DaemonSet configuration", + "storageClass", storageClassName) + // Don't fail mount if ConfigMap is misconfigured + // Just proceed without node affinity + return nil + } + + jfsSetting.StorageClassNodeAffinity = nodeAffinity + if nodeAffinity != nil { + log.Info("Loaded node affinity from ConfigMap for DaemonSet deployment", + "storageClass", storageClassName) + } + + return nil +} \ No newline at end of file diff --git a/pkg/config/mount_config_test.go b/pkg/config/mount_config_test.go new file mode 100644 index 0000000000..69308d6caf --- /dev/null +++ b/pkg/config/mount_config_test.go @@ -0,0 +1,313 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" +) + +func TestParseMountConfig(t *testing.T) { + tests := []struct { + name string + configStr string + wantMode MountMode + wantErr bool + }{ + { + name: "per-pvc mode", + configStr: `mode: per-pvc`, + wantMode: MountModePVC, + }, + { + name: "shared-pod mode", + configStr: `mode: shared-pod`, + wantMode: MountModeSharedPod, + }, + { + name: "daemonset mode with node affinity", + configStr: ` +mode: daemonset +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: test-key + operator: In + values: + - test-value`, + wantMode: MountModeDaemonSet, + }, + { + name: "invalid mode", + configStr: `mode: invalid-mode`, + wantErr: true, + }, + { + name: "empty config uses default", + configStr: ``, + wantMode: "", // Will use default + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config, err := parseMountConfig(tt.configStr) + if (err != nil) != tt.wantErr { + t.Errorf("parseMountConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && config.Mode != tt.wantMode { + t.Errorf("parseMountConfig() mode = %v, want %v", config.Mode, tt.wantMode) + } + }) + } +} + +func TestGetMountConfig(t *testing.T) { + ctx := context.Background() + + tests := []struct { + name string + storageClassName string + configMap *corev1.ConfigMap + globalShareMount bool + wantMode MountMode + wantHasAffinity bool + }{ + { + name: "no configmap, use global per-pvc", + storageClassName: "test-sc", + configMap: nil, + globalShareMount: false, + wantMode: MountModePVC, + }, + { + name: "no configmap, use global shared-pod", + storageClassName: "test-sc", + configMap: nil, + globalShareMount: true, + wantMode: MountModeSharedPod, + }, + { + name: "configmap with default", + storageClassName: "test-sc", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: MountConfigMapName, + Namespace: Namespace, + }, + Data: map[string]string{ + DefaultConfigKey: `mode: shared-pod`, + }, + }, + wantMode: MountModeSharedPod, + }, + { + name: "configmap with specific storage class", + storageClassName: "test-sc", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: MountConfigMapName, + Namespace: Namespace, + }, + Data: map[string]string{ + DefaultConfigKey: `mode: per-pvc`, + "test-sc": ` +mode: daemonset +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: test + operator: Exists`, + }, + }, + wantMode: MountModeDaemonSet, + wantHasAffinity: true, + }, + { + name: "configmap with invalid config falls back to default", + storageClassName: "test-sc", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: MountConfigMapName, + Namespace: Namespace, + }, + Data: map[string]string{ + "test-sc": `mode: invalid`, + }, + }, + globalShareMount: true, + wantMode: MountModeSharedPod, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set global variables + StorageClassShareMount = tt.globalShareMount + + // Create fake k8s client + var objects []runtime.Object + if tt.configMap != nil { + objects = append(objects, tt.configMap) + } + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Get mount config + config, err := GetMountConfig(ctx, k8sClient, tt.storageClassName) + if err != nil { + t.Errorf("GetMountConfig() error = %v", err) + return + } + + if config.Mode != tt.wantMode { + t.Errorf("GetMountConfig() mode = %v, want %v", config.Mode, tt.wantMode) + } + + if tt.wantHasAffinity && config.NodeAffinity == nil { + t.Errorf("GetMountConfig() expected NodeAffinity but got nil") + } + }) + } +} + +func TestMountModeHelpers(t *testing.T) { + tests := []struct { + name string + mountMode string + wantDaemonSet bool + wantSharedPod bool + wantPVCPod bool + }{ + { + name: "daemonset mode", + mountMode: string(MountModeDaemonSet), + wantDaemonSet: true, + wantSharedPod: false, + wantPVCPod: false, + }, + { + name: "shared-pod mode", + mountMode: string(MountModeSharedPod), + wantDaemonSet: false, + wantSharedPod: true, + wantPVCPod: false, + }, + { + name: "per-pvc mode", + mountMode: string(MountModePVC), + wantDaemonSet: false, + wantSharedPod: false, + wantPVCPod: true, + }, + { + name: "empty mode defaults to per-pvc", + mountMode: "", + wantDaemonSet: false, + wantSharedPod: false, + wantPVCPod: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setting := &JfsSetting{ + MountMode: tt.mountMode, + } + + if got := ShouldUseDaemonSet(setting); got != tt.wantDaemonSet { + t.Errorf("ShouldUseDaemonSet() = %v, want %v", got, tt.wantDaemonSet) + } + if got := ShouldUseSharedPod(setting); got != tt.wantSharedPod { + t.Errorf("ShouldUseSharedPod() = %v, want %v", got, tt.wantSharedPod) + } + if got := ShouldUsePVCPod(setting); got != tt.wantPVCPod { + t.Errorf("ShouldUsePVCPod() = %v, want %v", got, tt.wantPVCPod) + } + }) + } +} + +func TestLoadMountConfig(t *testing.T) { + ctx := context.Background() + + // Create test PV + pv := &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + } + + // Create configmap + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: MountConfigMapName, + Namespace: Namespace, + }, + Data: map[string]string{ + "test-sc": ` +mode: daemonset +nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: test-key + operator: In + values: + - test-value`, + }, + } + + // Create fake k8s client + fakeClient := fake.NewSimpleClientset(configMap) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Test loading config + setting := &JfsSetting{ + PV: pv, + UniqueId: "test-unique-id", + } + + err := LoadMountConfig(ctx, k8sClient, setting) + if err != nil { + t.Errorf("LoadMountConfig() error = %v", err) + return + } + + if setting.MountMode != string(MountModeDaemonSet) { + t.Errorf("LoadMountConfig() mount mode = %v, want %v", setting.MountMode, MountModeDaemonSet) + } + + if setting.StorageClassNodeAffinity == nil { + t.Errorf("LoadMountConfig() expected NodeAffinity but got nil") + } +} \ No newline at end of file diff --git a/pkg/config/setting.go b/pkg/config/setting.go index 84a9b11542..2607dae372 100644 --- a/pkg/config/setting.go +++ b/pkg/config/setting.go @@ -93,6 +93,10 @@ type JfsSetting struct { PV *corev1.PersistentVolume `json:"-"` PVC *corev1.PersistentVolumeClaim `json:"-"` + + // Mount configuration + MountMode string `json:"-"` // per-pvc, shared-pod, or daemonset + StorageClassNodeAffinity *corev1.NodeAffinity `json:"-"` } func (s *JfsSetting) String() string { @@ -293,6 +297,18 @@ func ParseSetting(ctx context.Context, secrets, volCtx map[string]string, option } jfsSetting.DeletedDelay = delay } + + // Parse node affinity for DaemonSet deployment + // First check if it's in volCtx (for backward compatibility) + if volCtx["nodeAffinity"] != "" && StorageClassShareMount { + nodeAffinity := &corev1.NodeAffinity{} + if err := yaml.Unmarshal([]byte(volCtx["nodeAffinity"]), nodeAffinity); err != nil { + log.Error(err, "Failed to parse nodeAffinity", "nodeAffinity", volCtx["nodeAffinity"]) + return nil, fmt.Errorf("failed to parse nodeAffinity: %v", err) + } + jfsSetting.StorageClassNodeAffinity = nodeAffinity + log.V(1).Info("Parsed nodeAffinity for DaemonSet deployment from StorageClass", "nodeAffinity", nodeAffinity) + } var hostPaths []string if volCtx[common.MountPodHostPath] != "" { diff --git a/pkg/juicefs/juicefs.go b/pkg/juicefs/juicefs.go index 73c899ddac..92c4cd5a51 100644 --- a/pkg/juicefs/juicefs.go +++ b/pkg/juicefs/juicefs.go @@ -200,7 +200,8 @@ func NewJfsProvider(mounter *mount.SafeFormatAndMount, k8sClient *k8sclient.K8sC if config.ByProcess { mnt = podmount.NewProcessMount(*mounter) } else { - mnt = podmount.NewPodMount(k8sClient, *mounter) + // Use MountSelector to dynamically choose mount implementation based on configuration + mnt = podmount.NewMountSelector(k8sClient, *mounter) } uuidMaps := make(map[string]string) @@ -371,18 +372,18 @@ func (j *juicefs) genJfsSettings(ctx context.Context, volumeID string, target st } // getUniqueId: get UniqueId from volumeId (volumeHandle of PV) -// When STORAGE_CLASS_SHARE_MOUNT env is set: +// The uniqueId determines how mount pods are grouped: // -// in dynamic provision, UniqueId set as SC name -// if sc secrets is template. UniqueId set as volumeId -// in static provision, UniqueId set as volumeId -// -// When STORAGE_CLASS_SHARE_MOUNT env not set: -// -// UniqueId set as volumeId +// per-pvc mode: UniqueId set as volumeId (each PVC gets its own mount pod) +// shared-pod/daemonset mode in dynamic provision: UniqueId set as SC name +// (if sc secrets is template, UniqueId set as volumeId) +// shared-pod/daemonset mode in static provision: UniqueId set as volumeId func (j *juicefs) getUniqueId(ctx context.Context, volumeId string) (string, error) { log := util.GenLog(ctx, jfsLog, "getUniqueId") - if config.StorageClassShareMount && !config.ByProcess { + + // First check if we should use shared mount based on configuration + // This will be determined when loading mount config + if !config.ByProcess { pv, err := j.K8sClient.GetPersistentVolume(ctx, volumeId) // In static provision, volumeId may not be PV name, it is expected that PV cannot be found by volumeId if err != nil && !k8serrors.IsNotFound(err) { diff --git a/pkg/juicefs/mount/builder/daemonset.go b/pkg/juicefs/mount/builder/daemonset.go new file mode 100644 index 0000000000..6e1f5c5ff1 --- /dev/null +++ b/pkg/juicefs/mount/builder/daemonset.go @@ -0,0 +1,141 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/juicedata/juicefs-csi-driver/pkg/common" + "github.com/juicedata/juicefs-csi-driver/pkg/config" +) + +type DaemonSetBuilder struct { + BaseBuilder +} + +func NewDaemonSetBuilder(setting *config.JfsSetting, capacity int64) *DaemonSetBuilder { + return &DaemonSetBuilder{ + BaseBuilder: BaseBuilder{ + jfsSetting: setting, + capacity: capacity, + }, + } +} + +// NewMountDaemonSet generates a DaemonSet with juicefs client for storage class sharing +func (d *DaemonSetBuilder) NewMountDaemonSet(dsName string) (*appsv1.DaemonSet, error) { + podBuilder := NewPodBuilder(d.jfsSetting, d.capacity) + + // Create template pod for DaemonSet + pod := podBuilder.genCommonJuicePod(podBuilder.genCommonContainer) + pod.Spec.RestartPolicy = corev1.RestartPolicyAlways + + // Generate mount command + mountCmd := d.genMountCommand() + cmd := mountCmd + initCmd := d.genInitCommand() + if initCmd != "" { + cmd = strings.Join([]string{initCmd, mountCmd}, "\n") + } + pod.Spec.Containers[0].Command = []string{"sh", "-c", cmd} + pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, corev1.EnvVar{ + Name: "JFS_FOREGROUND", + Value: "1", + }) + + // Generate volumes and volumeMounts using PodBuilder + volumes, volumeMounts := podBuilder.genPodVolumes() + pod.Spec.Volumes = append(pod.Spec.Volumes, volumes...) + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, volumeMounts...) + + // Add cache-dir hostpath & PVC volume + cacheVolumes, cacheVolumeMounts := podBuilder.genCacheDirVolumes() + pod.Spec.Volumes = append(pod.Spec.Volumes, cacheVolumes...) + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, cacheVolumeMounts...) + + // Add mount path host path volume + mountVolumes, mountVolumeMounts := podBuilder.genHostPathVolumes() + pod.Spec.Volumes = append(pod.Spec.Volumes, mountVolumes...) + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, mountVolumeMounts...) + + // Add users custom volumes, volumeMounts, volumeDevices + if d.jfsSetting.Attr.Volumes != nil { + pod.Spec.Volumes = append(pod.Spec.Volumes, d.jfsSetting.Attr.Volumes...) + } + if d.jfsSetting.Attr.VolumeMounts != nil { + pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, d.jfsSetting.Attr.VolumeMounts...) + } + if d.jfsSetting.Attr.VolumeDevices != nil { + pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices, d.jfsSetting.Attr.VolumeDevices...) + } + + // Create DaemonSet + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: dsName, + Namespace: config.Namespace, + Labels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: d.jfsSetting.UniqueId, + common.PodJuiceHashLabelKey: d.jfsSetting.HashVal, + }, + Annotations: map[string]string{}, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: d.jfsSetting.UniqueId, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: d.jfsSetting.UniqueId, + common.PodJuiceHashLabelKey: d.jfsSetting.HashVal, + }, + Annotations: map[string]string{}, + }, + Spec: pod.Spec, + }, + UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ + Type: appsv1.RollingUpdateDaemonSetStrategyType, + }, + }, + } + + // Add node affinity if needed for specific storage class + if d.jfsSetting.StorageClassNodeAffinity != nil { + if ds.Spec.Template.Spec.Affinity == nil { + ds.Spec.Template.Spec.Affinity = &corev1.Affinity{} + } + ds.Spec.Template.Spec.Affinity.NodeAffinity = d.jfsSetting.StorageClassNodeAffinity + } + + return ds, nil +} + +// GenDaemonSetNameByUniqueId generates DaemonSet name by unique ID +func GenDaemonSetNameByUniqueId(uniqueId string) string { + return fmt.Sprintf("juicefs-%s-mount-ds", uniqueId) +} \ No newline at end of file diff --git a/pkg/juicefs/mount/daemonset_mount.go b/pkg/juicefs/mount/daemonset_mount.go new file mode 100644 index 0000000000..1a34d335e1 --- /dev/null +++ b/pkg/juicefs/mount/daemonset_mount.go @@ -0,0 +1,602 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "context" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + k8sMount "k8s.io/utils/mount" + + "github.com/juicedata/juicefs-csi-driver/pkg/common" + jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" + "github.com/juicedata/juicefs-csi-driver/pkg/juicefs/mount/builder" + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" + "github.com/juicedata/juicefs-csi-driver/pkg/util" + "github.com/juicedata/juicefs-csi-driver/pkg/util/resource" +) + +// DaemonSetSchedulingError indicates that a DaemonSet cannot schedule on a specific node +type DaemonSetSchedulingError struct { + DaemonSetName string + NodeName string + Message string +} + +func (e *DaemonSetSchedulingError) Error() string { + return e.Message +} + +// IsDaemonSetSchedulingError checks if the error is a DaemonSet scheduling error +func IsDaemonSetSchedulingError(err error) bool { + _, ok := err.(*DaemonSetSchedulingError) + return ok +} + +type DaemonSetMount struct { + log klog.Logger + k8sMount.SafeFormatAndMount + K8sClient *k8sclient.K8sClient +} + +var _ MntInterface = &DaemonSetMount{} + +func NewDaemonSetMount(client *k8sclient.K8sClient, mounter k8sMount.SafeFormatAndMount) MntInterface { + return &DaemonSetMount{ + klog.NewKlogr().WithName("daemonset-mount"), + mounter, client} +} + +func (d *DaemonSetMount) JMount(ctx context.Context, appInfo *jfsConfig.AppInfo, jfsSetting *jfsConfig.JfsSetting) error { + d.log = util.GenLog(ctx, d.log, "JMount") + hashVal := jfsConfig.GenHashOfSetting(d.log, *jfsSetting) + jfsSetting.HashVal = hashVal + jfsSetting.UpgradeUUID = string(uuid.NewUUID()) + + var dsName string + var err error + + if err = func() error { + lock := jfsConfig.GetPodLock(hashVal) + lock.Lock() + defer lock.Unlock() + + dsName = d.genDaemonSetName(jfsSetting) + + // Create or update DaemonSet + err = d.createOrUpdateDaemonSet(ctx, dsName, jfsSetting) + if err != nil { + return err + } + + return nil + }(); err != nil { + return err + } + + // Wait for DaemonSet to be ready on the current node + err = d.waitUntilDaemonSetReady(ctx, dsName, jfsSetting) + if err != nil { + return err + } + + if jfsSetting.UUID != "" { + // Set uuid as annotation in DaemonSet for clean cache + err = d.setUUIDAnnotation(ctx, dsName, jfsSetting.UUID) + if err != nil { + return err + } + } + + return nil +} + +func (d *DaemonSetMount) GetMountRef(ctx context.Context, target, dsName string) (int, error) { + log := util.GenLog(ctx, d.log, "GetMountRef") + + // For DaemonSet, we track references differently + // Each PV using this DaemonSet will add an annotation + ds, err := d.K8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + return 0, nil + } + log.Error(err, "Get DaemonSet error", "dsName", dsName) + return 0, err + } + + // Count references in annotations + refCount := 0 + referencePrefix := "juicefs-" + for k := range ds.Annotations { + if strings.HasPrefix(k, referencePrefix) { + refCount++ + } + } + + return refCount, nil +} + +func (d *DaemonSetMount) UmountTarget(ctx context.Context, target, dsName string) error { + log := util.GenLog(ctx, d.log, "UmountTarget") + + // Unmount the target + log.Info("lazy umount", "target", target) + if err := util.UmountPath(ctx, target, true); err != nil { + return err + } + + // Remove reference from DaemonSet + key := util.GetReferenceKey(target) + err := d.removeReference(ctx, dsName, key) + if err != nil { + log.Error(err, "Remove reference from DaemonSet error", "dsName", dsName) + return err + } + + // Check if DaemonSet has any remaining references + refCount, err := d.GetMountRef(ctx, target, dsName) + if err != nil { + return err + } + + // If no more references, we can delete the DaemonSet + if refCount == 0 { + log.Info("No more references, deleting DaemonSet", "dsName", dsName) + if err := d.K8sClient.DeleteDaemonSet(ctx, dsName, jfsConfig.Namespace); err != nil && !k8serrors.IsNotFound(err) { + log.Error(err, "Delete DaemonSet error", "dsName", dsName) + return err + } + } + + return nil +} + +func (d *DaemonSetMount) JUmount(ctx context.Context, target, podName string) error { + // For DaemonSet mount, podName might be the DaemonSet name or we need to find it + dsName := podName + if dsName == "" { + dsName = d.getDaemonSetNameFromTarget(ctx, target) + if dsName == "" { + return fmt.Errorf("cannot find DaemonSet for target %s", target) + } + } + return d.UmountTarget(ctx, target, dsName) +} + +func (d *DaemonSetMount) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + // Volume creation is not supported via DaemonSet + return fmt.Errorf("volume creation not supported via DaemonSet mount") +} + +func (d *DaemonSetMount) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + // Volume deletion is not supported via DaemonSet + return fmt.Errorf("volume deletion not supported via DaemonSet mount") +} + +func (d *DaemonSetMount) AddRefOfMount(ctx context.Context, target string, podName string) error { + // For DaemonSet, we add reference as annotation + dsName := podName + if dsName == "" { + dsName = d.getDaemonSetNameFromTarget(ctx, target) + if dsName == "" { + return fmt.Errorf("cannot find DaemonSet for target %s", target) + } + } + key := util.GetReferenceKey(target) + return d.addReference(ctx, dsName, key, target) +} + +func (d *DaemonSetMount) CleanCache(ctx context.Context, image string, id string, volumeId string, cacheDirs []string) error { + // Cache cleaning implementation + // This would need to be implemented based on your cache cleaning strategy + log := util.GenLog(ctx, d.log, "CleanCache") + log.Info("Cache cleaning requested", "volumeId", volumeId) + // For now, return nil as cache cleaning might be handled differently for DaemonSets + return nil +} + +func (d *DaemonSetMount) genDaemonSetName(jfsSetting *jfsConfig.JfsSetting) string { + return builder.GenDaemonSetNameByUniqueId(jfsSetting.UniqueId) +} + +func (d *DaemonSetMount) createOrUpdateDaemonSet(ctx context.Context, dsName string, jfsSetting *jfsConfig.JfsSetting) error { + log := util.GenLog(ctx, d.log, "createOrUpdateDaemonSet") + + // Load DaemonSet configuration from ConfigMap if not already set + if err := jfsConfig.LoadDaemonSetNodeAffinity(ctx, d.K8sClient, jfsSetting); err != nil { + log.Error(err, "Failed to load DaemonSet node affinity, proceeding without it") + } + + r := builder.NewDaemonSetBuilder(jfsSetting, 0) + secret := r.NewSecret() + builder.SetPVAsOwner(&secret, jfsSetting.PV) + key := util.GetReferenceKey(jfsSetting.TargetPath) + + // Check if DaemonSet exists + existingDS, err := d.K8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil && !k8serrors.IsNotFound(err) { + log.Error(err, "Get DaemonSet error", "dsName", dsName) + return err + } + + // Create or update secret + if err := resource.CreateOrUpdateSecret(ctx, d.K8sClient, &secret); err != nil { + return err + } + + if k8serrors.IsNotFound(err) { + // DaemonSet doesn't exist, create it + log.Info("Creating new DaemonSet", "dsName", dsName) + newDS, err := r.NewMountDaemonSet(dsName) + if err != nil { + log.Error(err, "Generate DaemonSet error", "dsName", dsName) + return err + } + + // Add reference annotation + newDS.Annotations[key] = jfsSetting.TargetPath + + if _, err := d.K8sClient.CreateDaemonSet(ctx, newDS); err != nil { + log.Error(err, "Create DaemonSet error", "dsName", dsName) + return err + } + } else { + // DaemonSet exists, add reference + log.Info("DaemonSet exists, adding reference", "dsName", dsName) + + // Check if hash matches + if existingDS.Labels[common.PodJuiceHashLabelKey] != jfsSetting.HashVal { + log.Info("Hash mismatch, updating DaemonSet", "dsName", dsName, + "oldHash", existingDS.Labels[common.PodJuiceHashLabelKey], + "newHash", jfsSetting.HashVal) + + // Update DaemonSet with new configuration + newDS, err := r.NewMountDaemonSet(dsName) + if err != nil { + return err + } + + // Preserve existing annotations + referencePrefix := "juicefs-" + for k, v := range existingDS.Annotations { + if strings.HasPrefix(k, referencePrefix) { + newDS.Annotations[k] = v + } + } + // Add new reference + newDS.Annotations[key] = jfsSetting.TargetPath + + // Update DaemonSet + existingDS.Spec = newDS.Spec + existingDS.Labels = newDS.Labels + existingDS.Annotations = newDS.Annotations + + if err := d.K8sClient.UpdateDaemonSet(ctx, existingDS); err != nil { + log.Error(err, "Update DaemonSet error", "dsName", dsName) + return err + } + } else { + // Just add reference + if err := d.addReference(ctx, dsName, key, jfsSetting.TargetPath); err != nil { + return err + } + } + } + + return nil +} + +func (d *DaemonSetMount) waitUntilDaemonSetReady(ctx context.Context, dsName string, jfsSetting *jfsConfig.JfsSetting) error { + log := util.GenLog(ctx, d.log, "waitUntilDaemonSetReady") + + // First, check if the DaemonSet can schedule a pod on this node + canSchedule, err := d.canScheduleOnNode(ctx, dsName) + if err != nil { + log.Error(err, "Failed to check if DaemonSet can schedule on node") + // Continue anyway, might be a transient error + } + + if !canSchedule { + // DaemonSet cannot schedule on this node due to nodeAffinity + // Return a specific error that can be handled by the caller + log.Info("DaemonSet cannot schedule on this node due to nodeAffinity, need fallback", + "dsName", dsName, "nodeName", jfsConfig.NodeName) + return &DaemonSetSchedulingError{ + DaemonSetName: dsName, + NodeName: jfsConfig.NodeName, + Message: "DaemonSet cannot schedule on this node due to nodeAffinity restrictions", + } + } + + // Wait for DaemonSet to have pods ready on current node + timeout := 5 * time.Minute + waitCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for { + select { + case <-waitCtx.Done(): + // Timeout - could be because pod cannot be scheduled on this node + return &DaemonSetSchedulingError{ + DaemonSetName: dsName, + NodeName: jfsConfig.NodeName, + Message: fmt.Sprintf("timeout waiting for DaemonSet pod to be ready on node %s", jfsConfig.NodeName), + } + default: + ds, err := d.K8sClient.GetDaemonSet(waitCtx, dsName, jfsConfig.Namespace) + if err != nil { + log.Error(err, "Get DaemonSet error", "dsName", dsName) + time.Sleep(2 * time.Second) + continue + } + + // Check if DaemonSet has pods scheduled on current node + labelSelector := &metav1.LabelSelector{ + MatchLabels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: jfsSetting.UniqueId, + }, + } + + pods, err := d.K8sClient.ListPod(waitCtx, jfsConfig.Namespace, labelSelector, nil) + if err != nil { + log.Error(err, "List pods error") + time.Sleep(2 * time.Second) + continue + } + + // Find pod on current node + for _, pod := range pods { + if pod.Spec.NodeName == jfsConfig.NodeName { + // Check if pod is ready + if resource.IsPodReady(&pod) { + log.Info("DaemonSet pod is ready on node", "podName", pod.Name, "nodeName", jfsConfig.NodeName) + + // Update mount path from the pod + mountPath, _, err := util.GetMountPathOfPod(pod) + if err != nil { + log.Error(err, "Get mount path from pod error", "podName", pod.Name) + return err + } + jfsSetting.MountPath = mountPath + return nil + } + } + } + + log.V(1).Info("Waiting for DaemonSet pod to be ready", "dsName", dsName, "desiredNumberScheduled", ds.Status.DesiredNumberScheduled, "numberReady", ds.Status.NumberReady) + time.Sleep(2 * time.Second) + } + } +} + +func (d *DaemonSetMount) addReference(ctx context.Context, dsName, key, value string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + ds, err := d.K8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil { + return err + } + + if ds.Annotations == nil { + ds.Annotations = make(map[string]string) + } + ds.Annotations[key] = value + + return d.K8sClient.UpdateDaemonSet(ctx, ds) + }) +} + +func (d *DaemonSetMount) removeReference(ctx context.Context, dsName, key string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + ds, err := d.K8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + + if ds.Annotations != nil { + delete(ds.Annotations, key) + } + + return d.K8sClient.UpdateDaemonSet(ctx, ds) + }) +} + +func (d *DaemonSetMount) setUUIDAnnotation(ctx context.Context, dsName, uuid string) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + ds, err := d.K8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil { + return err + } + + if ds.Annotations == nil { + ds.Annotations = make(map[string]string) + } + ds.Annotations[common.JuiceFSUUID] = uuid + + return d.K8sClient.UpdateDaemonSet(ctx, ds) + }) +} + +func (d *DaemonSetMount) getDaemonSetNameFromTarget(ctx context.Context, target string) string { + // List all DaemonSets and find the one with this target + dsList, err := d.K8sClient.ListDaemonSet(ctx, jfsConfig.Namespace, nil) + if err != nil { + return "" + } + + key := util.GetReferenceKey(target) + for _, ds := range dsList { + if ds.Annotations != nil && ds.Annotations[key] == target { + return ds.Name + } + } + + return "" +} + +// canScheduleOnNode checks if a DaemonSet can schedule a pod on the current node +func (d *DaemonSetMount) canScheduleOnNode(ctx context.Context, dsName string) (bool, error) { + log := util.GenLog(ctx, d.log, "canScheduleOnNode") + + // Get the DaemonSet + ds, err := d.K8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil { + return false, err + } + + // Get the current node + node, err := d.K8sClient.GetNode(ctx, jfsConfig.NodeName) + if err != nil { + log.Error(err, "Failed to get node", "nodeName", jfsConfig.NodeName) + return false, err + } + + // Check if the node matches the DaemonSet's nodeAffinity + if ds.Spec.Template.Spec.Affinity != nil && ds.Spec.Template.Spec.Affinity.NodeAffinity != nil { + nodeAffinity := ds.Spec.Template.Spec.Affinity.NodeAffinity + + // Check required node affinity + if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + matches := false + for _, term := range nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + if nodeMatchesSelectorTerm(node, &term) { + matches = true + break + } + } + if !matches { + log.Info("Node does not match DaemonSet's required node affinity", + "nodeName", jfsConfig.NodeName, "dsName", dsName) + return false, nil + } + } + } + + // Check if the node has any taints that would prevent scheduling + // (This is a simplified check - a full implementation would need to check tolerations) + if len(node.Spec.Taints) > 0 && len(ds.Spec.Template.Spec.Tolerations) == 0 { + for _, taint := range node.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { + log.Info("Node has taints that prevent scheduling", + "nodeName", jfsConfig.NodeName, "taint", taint) + return false, nil + } + } + } + + return true, nil +} + +// nodeMatchesSelectorTerm checks if a node matches a node selector term +func nodeMatchesSelectorTerm(node *corev1.Node, term *corev1.NodeSelectorTerm) bool { + // Check match expressions + for _, expr := range term.MatchExpressions { + if !nodeMatchesExpression(node, &expr) { + return false + } + } + + // Check match fields + for _, field := range term.MatchFields { + if !nodeMatchesFieldSelector(node, &field) { + return false + } + } + + return true +} + +// nodeMatchesExpression checks if a node matches a label selector requirement +func nodeMatchesExpression(node *corev1.Node, expr *corev1.NodeSelectorRequirement) bool { + value, exists := node.Labels[expr.Key] + + switch expr.Operator { + case corev1.NodeSelectorOpIn: + if !exists { + return false + } + for _, v := range expr.Values { + if value == v { + return true + } + } + return false + case corev1.NodeSelectorOpNotIn: + if !exists { + return true + } + for _, v := range expr.Values { + if value == v { + return false + } + } + return true + case corev1.NodeSelectorOpExists: + return exists + case corev1.NodeSelectorOpDoesNotExist: + return !exists + case corev1.NodeSelectorOpGt, corev1.NodeSelectorOpLt: + // These operators are typically used for numeric comparisons + // For simplicity, we're not implementing them here + return true + default: + return false + } +} + +// nodeMatchesFieldSelector checks if a node matches a field selector +func nodeMatchesFieldSelector(node *corev1.Node, field *corev1.NodeSelectorRequirement) bool { + var value string + switch field.Key { + case "metadata.name": + value = node.Name + // Add more field selectors as needed + default: + return false + } + + switch field.Operator { + case corev1.NodeSelectorOpIn: + for _, v := range field.Values { + if value == v { + return true + } + } + return false + case corev1.NodeSelectorOpNotIn: + for _, v := range field.Values { + if value == v { + return false + } + } + return true + default: + return false + } +} \ No newline at end of file diff --git a/pkg/juicefs/mount/daemonset_mount_simple_test.go b/pkg/juicefs/mount/daemonset_mount_simple_test.go new file mode 100644 index 0000000000..8734892dc4 --- /dev/null +++ b/pkg/juicefs/mount/daemonset_mount_simple_test.go @@ -0,0 +1,360 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "context" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8sMount "k8s.io/utils/mount" + + "github.com/juicedata/juicefs-csi-driver/pkg/common" + jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" + "github.com/juicedata/juicefs-csi-driver/pkg/util" +) + +func TestDaemonSetMount_CreateOrUpdate(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + jfsSetting *jfsConfig.JfsSetting + existingDS *appsv1.DaemonSet + expectHash string + expectRefs int + }{ + { + name: "create new daemonset", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-unique-id", + TargetPath: "/var/lib/kubelet/pods/test-pod/volumes/test-volume", + VolumeId: "test-volume", + Name: "test-name", + MetaUrl: "redis://localhost:6379/1", + Source: "test-source", + PV: &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + }, + MountMode: string(jfsConfig.MountModeDaemonSet), + Attr: &jfsConfig.PodAttr{ + Image: "juicedata/mount:latest", + }, + }, + existingDS: nil, + expectRefs: 1, + }, + { + name: "add reference to existing daemonset", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-unique-id", + TargetPath: "/var/lib/kubelet/pods/test-pod/volumes/test-volume2", + VolumeId: "test-volume", + Name: "test-name", + MetaUrl: "redis://localhost:6379/1", + Source: "test-source", + HashVal: "test-hash", + PV: &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + }, + MountMode: string(jfsConfig.MountModeDaemonSet), + Attr: &jfsConfig.PodAttr{ + Image: "juicedata/mount:latest", + }, + }, + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juicefs-test-unique-id-mount-ds", + Namespace: "test-ns", + Labels: map[string]string{ + common.PodJuiceHashLabelKey: "test-hash", + }, + Annotations: map[string]string{ + util.GetReferenceKey("/existing/path"): "/existing/path", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + common.PodUniqueIdLabelKey: "test-unique-id", + }, + }, + }, + }, + expectHash: "test-hash", + expectRefs: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake k8s client + var objects []runtime.Object + if tt.existingDS != nil { + objects = append(objects, tt.existingDS) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create DaemonSetMount + mounter := &k8sMount.FakeMounter{} + d := NewDaemonSetMount(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }).(*DaemonSetMount) + + // Generate hash if not set + if tt.jfsSetting.HashVal == "" { + hashVal := jfsConfig.GenHashOfSetting(d.log, *tt.jfsSetting) + tt.jfsSetting.HashVal = hashVal + } + + // Call createOrUpdateDaemonSet + dsName := d.genDaemonSetName(tt.jfsSetting) + err := d.createOrUpdateDaemonSet(ctx, dsName, tt.jfsSetting) + if err != nil { + t.Errorf("createOrUpdateDaemonSet() error = %v", err) + return + } + + // Check DaemonSet was created/updated + ds, err := k8sClient.GetDaemonSet(ctx, dsName, jfsConfig.Namespace) + if err != nil { + t.Errorf("GetDaemonSet() error = %v", err) + return + } + + // Check hash + if tt.expectHash != "" && ds.Labels[common.PodJuiceHashLabelKey] != tt.expectHash { + t.Errorf("DaemonSet hash = %v, want %v", ds.Labels[common.PodJuiceHashLabelKey], tt.expectHash) + } + + // Debug: print annotations + t.Logf("DaemonSet annotations: %v", ds.Annotations) + + // Count references + refCount := 0 + referencePrefix := "juicefs-" + t.Logf("Looking for prefix: %v", referencePrefix) + for k := range ds.Annotations { + if strings.HasPrefix(k, referencePrefix) { + refCount++ + t.Logf("Found reference: %v", k) + } + } + + if refCount != tt.expectRefs { + t.Errorf("Reference count = %v, want %v", refCount, tt.expectRefs) + } + }) + } +} + +func TestDaemonSetMount_References(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + // Create fake k8s client with a DaemonSet + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Annotations: map[string]string{ + util.GetReferenceKey("/path1"): "/path1", + }, + }, + } + + fakeClient := fake.NewSimpleClientset(ds) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create DaemonSetMount + mounter := &k8sMount.FakeMounter{} + d := NewDaemonSetMount(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }).(*DaemonSetMount) + + // Test adding reference + err := d.addReference(ctx, "test-ds", util.GetReferenceKey("/path2"), "/path2") + if err != nil { + t.Errorf("addReference() error = %v", err) + } + + // Check reference was added + refCount, err := d.GetMountRef(ctx, "/any", "test-ds") + if err != nil { + t.Errorf("GetMountRef() error = %v", err) + } + if refCount != 2 { + t.Errorf("Reference count after add = %v, want 2", refCount) + } + + // Test removing reference + err = d.removeReference(ctx, "test-ds", util.GetReferenceKey("/path1")) + if err != nil { + t.Errorf("removeReference() error = %v", err) + } + + // Check reference was removed + refCount, err = d.GetMountRef(ctx, "/any", "test-ds") + if err != nil { + t.Errorf("GetMountRef() error = %v", err) + } + if refCount != 1 { + t.Errorf("Reference count after remove = %v, want 1", refCount) + } +} + +func TestMountSelector_ConfigFallback(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + configMap *corev1.ConfigMap + storageClassName string + globalShareMount bool + wantMode jfsConfig.MountMode + }{ + { + name: "no configmap, fallback to global per-pvc", + configMap: nil, + storageClassName: "test-sc", + globalShareMount: false, + wantMode: jfsConfig.MountModePVC, + }, + { + name: "no configmap, fallback to global shared-pod", + configMap: nil, + storageClassName: "test-sc", + globalShareMount: true, + wantMode: jfsConfig.MountModeSharedPod, + }, + { + name: "invalid config in configmap, fallback to global", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + "test-sc": `mode: invalid-mode`, + }, + }, + storageClassName: "test-sc", + globalShareMount: true, + wantMode: jfsConfig.MountModeSharedPod, + }, + { + name: "configmap overrides global", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + "test-sc": `mode: per-pvc`, + }, + }, + storageClassName: "test-sc", + globalShareMount: true, + wantMode: jfsConfig.MountModePVC, + }, + { + name: "use default key when storage class not in configmap", + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + jfsConfig.DefaultConfigKey: `mode: daemonset`, + }, + }, + storageClassName: "unknown-sc", + globalShareMount: false, + wantMode: jfsConfig.MountModeDaemonSet, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set global variables + jfsConfig.StorageClassShareMount = tt.globalShareMount + + // Create fake k8s client + var objects []runtime.Object + if tt.configMap != nil { + objects = append(objects, tt.configMap) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create JfsSetting + jfsSetting := &jfsConfig.JfsSetting{ + UniqueId: "test-id", + VolumeId: "test-volume", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: tt.storageClassName, + }, + }, + } + + // Load mount config + err := jfsConfig.LoadMountConfig(ctx, k8sClient, jfsSetting) + if err != nil { + t.Errorf("LoadMountConfig() error = %v", err) + return + } + + // Check mount mode + actualMode := jfsConfig.MountMode(jfsSetting.MountMode) + if actualMode == "" { + // Determine from helper functions + if jfsConfig.ShouldUseDaemonSet(jfsSetting) { + actualMode = jfsConfig.MountModeDaemonSet + } else if jfsConfig.ShouldUseSharedPod(jfsSetting) { + actualMode = jfsConfig.MountModeSharedPod + } else { + actualMode = jfsConfig.MountModePVC + } + } + + if actualMode != tt.wantMode { + t.Errorf("Mount mode = %v, want %v", actualMode, tt.wantMode) + } + }) + } +} \ No newline at end of file diff --git a/pkg/juicefs/mount/daemonset_mount_test.go b/pkg/juicefs/mount/daemonset_mount_test.go new file mode 100644 index 0000000000..e53584fbc9 --- /dev/null +++ b/pkg/juicefs/mount/daemonset_mount_test.go @@ -0,0 +1,473 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "context" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8sMount "k8s.io/utils/mount" + + "github.com/juicedata/juicefs-csi-driver/pkg/common" + jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" + "github.com/juicedata/juicefs-csi-driver/pkg/util" +) + +func TestDaemonSetMount_JMount(t *testing.T) { + // Use a context with timeout for tests + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Set required global variables + jfsConfig.NodeName = "test-node" + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + jfsSetting *jfsConfig.JfsSetting + existingDS *appsv1.DaemonSet + existingPod *corev1.Pod + wantErr bool + errContains string + }{ + { + name: "create new daemonset", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-unique-id", + TargetPath: "/var/lib/kubelet/pods/test-pod/volumes/test-volume", + VolumeId: "test-volume", + Name: "test-name", + MetaUrl: "redis://localhost:6379/1", + Source: "test-source", + PV: &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + }, + MountMode: string(jfsConfig.MountModeDaemonSet), + Attr: &jfsConfig.PodAttr{ + Image: "juicedata/mount:latest", + }, + }, + existingDS: nil, + existingPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juicefs-test-unique-id-mount-ds-12345", + Namespace: "test-ns", + Labels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: "test-unique-id", + }, + }, + Spec: corev1.PodSpec{ + NodeName: "test-node", + Containers: []corev1.Container{ + { + Name: "jfs-mount", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "jfs-dir", + MountPath: "/data/test-unique-id", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "update existing daemonset with different hash", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-unique-id", + TargetPath: "/var/lib/kubelet/pods/test-pod/volumes/test-volume", + VolumeId: "test-volume", + Name: "test-name", + MetaUrl: "redis://localhost:6379/1", + Source: "test-source", + PV: &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + }, + MountMode: string(jfsConfig.MountModeDaemonSet), + Attr: &jfsConfig.PodAttr{ + Image: "juicedata/mount:latest", + }, + }, + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juicefs-test-unique-id-mount-ds", + Namespace: "test-ns", + Labels: map[string]string{ + common.PodJuiceHashLabelKey: "old-hash", + }, + Annotations: map[string]string{}, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + common.PodUniqueIdLabelKey: "test-unique-id", + }, + }, + }, + }, + existingPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juicefs-test-unique-id-mount-ds-12345", + Namespace: "test-ns", + Labels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: "test-unique-id", + }, + }, + Spec: corev1.PodSpec{ + NodeName: "test-node", + Containers: []corev1.Container{ + { + Name: "jfs-mount", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "jfs-dir", + MountPath: "/data/test-unique-id", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "add reference to existing daemonset with same hash", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-unique-id", + TargetPath: "/var/lib/kubelet/pods/test-pod/volumes/test-volume", + HashVal: "test-hash", + VolumeId: "test-volume", + Name: "test-name", + MetaUrl: "redis://localhost:6379/1", + Source: "test-source", + PV: &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv", + }, + }, + MountMode: string(jfsConfig.MountModeDaemonSet), + Attr: &jfsConfig.PodAttr{ + Image: "juicedata/mount:latest", + }, + }, + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juicefs-test-unique-id-mount-ds", + Namespace: "test-ns", + Labels: map[string]string{ + common.PodJuiceHashLabelKey: "test-hash", + }, + Annotations: map[string]string{ + util.GetReferenceKey("/existing/path"): "/existing/path", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + common.PodUniqueIdLabelKey: "test-unique-id", + }, + }, + }, + }, + existingPod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "juicefs-test-unique-id-mount-ds-12345", + Namespace: "test-ns", + Labels: map[string]string{ + common.PodTypeKey: common.PodTypeValue, + common.PodUniqueIdLabelKey: "test-unique-id", + }, + }, + Spec: corev1.PodSpec{ + NodeName: "test-node", + Containers: []corev1.Container{ + { + Name: "jfs-mount", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "jfs-dir", + MountPath: "/data/test-unique-id", + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake k8s client + var objects []runtime.Object + if tt.existingDS != nil { + objects = append(objects, tt.existingDS) + } + if tt.existingPod != nil { + objects = append(objects, tt.existingPod) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create DaemonSetMount + mounter := &k8sMount.FakeMounter{} + d := NewDaemonSetMount(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }) + + // Call JMount + appInfo := &jfsConfig.AppInfo{} + err := d.JMount(ctx, appInfo, tt.jfsSetting) + + // Check error + if (err != nil) != tt.wantErr { + t.Errorf("JMount() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err != nil && tt.errContains != "" && !contains(err.Error(), tt.errContains) { + t.Errorf("JMount() error = %v, want error containing %v", err, tt.errContains) + } + }) + } +} + +func TestDaemonSetMount_GetMountRef(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + dsName string + target string + existingDS *appsv1.DaemonSet + wantRefs int + wantErr bool + }{ + { + name: "daemonset not found", + dsName: "non-existent-ds", + target: "/test/target", + existingDS: nil, + wantRefs: 0, + wantErr: false, + }, + { + name: "daemonset with no references", + dsName: "test-ds", + target: "/test/target", + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Annotations: map[string]string{}, + }, + }, + wantRefs: 0, + wantErr: false, + }, + { + name: "daemonset with multiple references", + dsName: "test-ds", + target: "/test/target", + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Annotations: map[string]string{ + util.GetReferenceKey("/path1"): "/path1", + util.GetReferenceKey("/path2"): "/path2", + util.GetReferenceKey("/path3"): "/path3", + "other-annotation": "value", + }, + }, + }, + wantRefs: 3, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake k8s client + var objects []runtime.Object + if tt.existingDS != nil { + objects = append(objects, tt.existingDS) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create DaemonSetMount + mounter := &k8sMount.FakeMounter{} + d := NewDaemonSetMount(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }) + + // Get mount references + refs, err := d.GetMountRef(ctx, tt.target, tt.dsName) + + // Check error + if (err != nil) != tt.wantErr { + t.Errorf("GetMountRef() error = %v, wantErr %v", err, tt.wantErr) + return + } + + // Check reference count + if refs != tt.wantRefs { + t.Errorf("GetMountRef() refs = %v, want %v", refs, tt.wantRefs) + } + }) + } +} + +func TestDaemonSetMount_UmountTarget(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + dsName string + target string + existingDS *appsv1.DaemonSet + expectDelete bool + wantErr bool + }{ + { + name: "remove last reference and delete daemonset", + dsName: "test-ds", + target: "/test/target", + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Annotations: map[string]string{ + util.GetReferenceKey("/test/target"): "/test/target", + }, + }, + }, + expectDelete: true, + wantErr: false, + }, + { + name: "remove reference but keep daemonset", + dsName: "test-ds", + target: "/test/target", + existingDS: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Annotations: map[string]string{ + util.GetReferenceKey("/test/target"): "/test/target", + util.GetReferenceKey("/other/path"): "/other/path", + }, + }, + }, + expectDelete: false, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fake k8s client + var objects []runtime.Object + if tt.existingDS != nil { + objects = append(objects, tt.existingDS) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create DaemonSetMount + mounter := &k8sMount.FakeMounter{} + d := NewDaemonSetMount(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }) + + // Unmount target + err := d.UmountTarget(ctx, tt.target, tt.dsName) + + // Check error + if (err != nil) != tt.wantErr { + t.Errorf("UmountTarget() error = %v, wantErr %v", err, tt.wantErr) + return + } + + // Check if DaemonSet was deleted + _, err = k8sClient.GetDaemonSet(ctx, tt.dsName, jfsConfig.Namespace) + deleted := err != nil + if deleted != tt.expectDelete { + t.Errorf("DaemonSet deleted = %v, want %v", deleted, tt.expectDelete) + } + }) + } +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && s[:len(substr)] == substr || len(s) > len(substr) && contains(s[1:], substr) +} \ No newline at end of file diff --git a/pkg/juicefs/mount/mount_selector.go b/pkg/juicefs/mount/mount_selector.go new file mode 100644 index 0000000000..4ef4306f79 --- /dev/null +++ b/pkg/juicefs/mount/mount_selector.go @@ -0,0 +1,255 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "context" + + "k8s.io/klog/v2" + k8sMount "k8s.io/utils/mount" + + jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" + "github.com/juicedata/juicefs-csi-driver/pkg/util" +) + +// MountSelector dynamically selects the appropriate mount interface based on configuration +type MountSelector struct { + log klog.Logger + k8sMount.SafeFormatAndMount + K8sClient *k8sclient.K8sClient + processMount MntInterface + podMount MntInterface + daemonMount MntInterface +} + +var _ MntInterface = &MountSelector{} + +// NewMountSelector creates a new mount selector that chooses the appropriate mount implementation +func NewMountSelector(client *k8sclient.K8sClient, mounter k8sMount.SafeFormatAndMount) MntInterface { + return &MountSelector{ + log: klog.NewKlogr().WithName("mount-selector"), + SafeFormatAndMount: mounter, + K8sClient: client, + processMount: nil, // Created on demand + podMount: nil, // Created on demand + daemonMount: nil, // Created on demand + } +} + +// selectMount chooses the appropriate mount interface based on JfsSetting configuration +func (m *MountSelector) selectMount(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) MntInterface { + log := util.GenLog(ctx, m.log, "selectMount") + + // Load mount configuration from ConfigMap if not already loaded + if jfsSetting.MountMode == "" { + if err := jfsConfig.LoadMountConfig(ctx, m.K8sClient, jfsSetting); err != nil { + log.Error(err, "Failed to load mount configuration, using default") + } + } + + // Select mount implementation based on mode + switch { + case jfsConfig.ByProcess: + log.V(1).Info("Using process mount") + if m.processMount == nil { + m.processMount = NewProcessMount(m.SafeFormatAndMount) + } + return m.processMount + + case jfsConfig.ShouldUseDaemonSet(jfsSetting): + log.Info("Using DaemonSet mount", "uniqueId", jfsSetting.UniqueId) + if m.daemonMount == nil { + m.daemonMount = NewDaemonSetMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.daemonMount + + case jfsConfig.ShouldUseSharedPod(jfsSetting): + log.Info("Using shared pod mount", "uniqueId", jfsSetting.UniqueId) + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount + + default: + log.V(1).Info("Using per-PVC pod mount", "volumeId", jfsSetting.VolumeId) + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount + } +} + +// JMount mounts JuiceFS volume +func (m *MountSelector) JMount(ctx context.Context, appInfo *jfsConfig.AppInfo, jfsSetting *jfsConfig.JfsSetting) error { + log := util.GenLog(ctx, m.log, "JMount") + mnt := m.selectMount(ctx, jfsSetting) + + // Try to mount using the selected mount type + err := mnt.JMount(ctx, appInfo, jfsSetting) + + // If it's a DaemonSet scheduling error, fall back to shared pod mount + if IsDaemonSetSchedulingError(err) { + log.Info("DaemonSet cannot schedule on this node, falling back to shared pod mount", + "error", err, "uniqueId", jfsSetting.UniqueId) + + // Override the mount mode to shared-pod for this specific mount + originalMode := jfsSetting.MountMode + jfsSetting.MountMode = string(jfsConfig.MountModeSharedPod) + + // Use shared pod mount as fallback + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + + err = m.podMount.JMount(ctx, appInfo, jfsSetting) + + // Restore original mode (in case it's used elsewhere) + jfsSetting.MountMode = originalMode + + if err != nil { + log.Error(err, "Fallback to shared pod mount also failed") + return err + } + + log.Info("Successfully mounted using shared pod fallback", "uniqueId", jfsSetting.UniqueId) + return nil + } + + return err +} + +// GetMountRef gets mount references +func (m *MountSelector) GetMountRef(ctx context.Context, target, podName string) (int, error) { + // For GetMountRef, we need to determine which mount type is being used + // This is a bit tricky without the JfsSetting, so we check what exists + + // First check if it's a DaemonSet + if dsName := m.getDaemonSetNameFromPodName(podName); dsName != "" { + if m.daemonMount == nil { + m.daemonMount = NewDaemonSetMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.daemonMount.GetMountRef(ctx, target, dsName) + } + + // Otherwise use pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.GetMountRef(ctx, target, podName) +} + +// UmountTarget unmounts target +func (m *MountSelector) UmountTarget(ctx context.Context, target, podName string) error { + // Determine which mount type is being used + if dsName := m.getDaemonSetNameFromPodName(podName); dsName != "" { + if m.daemonMount == nil { + m.daemonMount = NewDaemonSetMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.daemonMount.UmountTarget(ctx, target, dsName) + } + + // Otherwise use pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.UmountTarget(ctx, target, podName) +} + +// JUmount unmounts JuiceFS volume +func (m *MountSelector) JUmount(ctx context.Context, target, podName string) error { + // Try to find if it's mounted by DaemonSet + if m.daemonMount == nil { + m.daemonMount = NewDaemonSetMount(m.K8sClient, m.SafeFormatAndMount) + } + + // Check if DaemonSet has this target + dsList, err := m.K8sClient.ListDaemonSet(ctx, jfsConfig.Namespace, nil) + if err == nil { + key := util.GetReferenceKey(target) + for _, ds := range dsList { + if ds.Annotations != nil && ds.Annotations[key] == target { + return m.daemonMount.JUmount(ctx, target, ds.Name) + } + } + } + + // Otherwise use pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.JUmount(ctx, target, podName) +} + + +// JCreateVolume creates JuiceFS volume (CE only) +func (m *MountSelector) JCreateVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + // Volume creation always uses pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.JCreateVolume(ctx, jfsSetting) +} + +// JDeleteVolume deletes JuiceFS volume (CE only) +func (m *MountSelector) JDeleteVolume(ctx context.Context, jfsSetting *jfsConfig.JfsSetting) error { + // Volume deletion always uses pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.JDeleteVolume(ctx, jfsSetting) +} + +// AddRefOfMount adds reference of mount +func (m *MountSelector) AddRefOfMount(ctx context.Context, target string, podName string) error { + // Determine which mount type is being used + if dsName := m.getDaemonSetNameFromPodName(podName); dsName != "" { + if m.daemonMount == nil { + m.daemonMount = NewDaemonSetMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.daemonMount.AddRefOfMount(ctx, target, dsName) + } + + // Otherwise use pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.AddRefOfMount(ctx, target, podName) +} + +// CleanCache cleans cache +func (m *MountSelector) CleanCache(ctx context.Context, image string, id string, volumeId string, cacheDirs []string) error { + // For now, delegate to pod mount + if m.podMount == nil { + m.podMount = NewPodMount(m.K8sClient, m.SafeFormatAndMount) + } + return m.podMount.CleanCache(ctx, image, id, volumeId, cacheDirs) +} + +// getDaemonSetNameFromPodName tries to determine if this pod is managed by a DaemonSet +func (m *MountSelector) getDaemonSetNameFromPodName(podName string) string { + // DaemonSet pods have a specific naming pattern + // This is a simple heuristic, could be improved + if len(podName) > 7 && podName[len(podName)-7:len(podName)-6] == "-" { + // Try to get the DaemonSet name + // Format: juicefs--mount-ds- + if len(podName) > 10 && podName[len(podName)-10:len(podName)-6] == "-ds-" { + return podName[:len(podName)-6] + } + } + return "" +} \ No newline at end of file diff --git a/pkg/juicefs/mount/mount_selector_test.go b/pkg/juicefs/mount/mount_selector_test.go new file mode 100644 index 0000000000..9c486caaa5 --- /dev/null +++ b/pkg/juicefs/mount/mount_selector_test.go @@ -0,0 +1,424 @@ +/* +Copyright 2021 Juicedata Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + k8sMount "k8s.io/utils/mount" + + jfsConfig "github.com/juicedata/juicefs-csi-driver/pkg/config" + "github.com/juicedata/juicefs-csi-driver/pkg/k8sclient" +) + +func TestMountSelector_SelectMount(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + jfsSetting *jfsConfig.JfsSetting + byProcess bool + configMap *corev1.ConfigMap + globalShareMount bool + wantMountType string // "process", "daemonset", "pod" + }{ + { + name: "process mount when ByProcess is true", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + }, + byProcess: true, + wantMountType: "process", + }, + { + name: "daemonset mount from explicit mode", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + MountMode: string(jfsConfig.MountModeDaemonSet), + }, + byProcess: false, + wantMountType: "daemonset", + }, + { + name: "shared pod mount from explicit mode", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + MountMode: string(jfsConfig.MountModeSharedPod), + }, + byProcess: false, + wantMountType: "pod", + }, + { + name: "per-pvc pod mount from explicit mode", + jfsSetting: &jfsConfig.JfsSetting{ + VolumeId: "test-volume", + MountMode: string(jfsConfig.MountModePVC), + }, + byProcess: false, + wantMountType: "pod", + }, + { + name: "fallback to global shared pod when no mode specified", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + byProcess: false, + globalShareMount: true, + wantMountType: "pod", + }, + { + name: "fallback to per-pvc when no configuration", + jfsSetting: &jfsConfig.JfsSetting{ + VolumeId: "test-volume", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + byProcess: false, + globalShareMount: false, + wantMountType: "pod", + }, + { + name: "load from configmap - daemonset mode", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + "test-sc": `mode: daemonset`, + }, + }, + byProcess: false, + wantMountType: "daemonset", + }, + { + name: "load from configmap default - shared-pod mode", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "unknown-sc", + }, + }, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + jfsConfig.DefaultConfigKey: `mode: shared-pod`, + }, + }, + byProcess: false, + wantMountType: "pod", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set global variables + jfsConfig.ByProcess = tt.byProcess + jfsConfig.StorageClassShareMount = tt.globalShareMount + + // Create fake k8s client + var objects []runtime.Object + if tt.configMap != nil { + objects = append(objects, tt.configMap) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create MountSelector + mounter := &k8sMount.FakeMounter{} + m := NewMountSelector(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }) + + // Select mount + selector := m.(*MountSelector) + mnt := selector.selectMount(ctx, tt.jfsSetting) + + // Check mount type + switch tt.wantMountType { + case "process": + if _, ok := mnt.(*ProcessMount); !ok { + t.Errorf("Expected ProcessMount, got %T", mnt) + } + case "daemonset": + if _, ok := mnt.(*DaemonSetMount); !ok { + t.Errorf("Expected DaemonSetMount, got %T", mnt) + } + case "pod": + if _, ok := mnt.(*PodMount); !ok { + t.Errorf("Expected PodMount, got %T", mnt) + } + } + }) + } +} + +func TestMountSelector_DaemonSetFallback(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + jfsConfig.NodeName = "test-node" + + // Create a test case where DaemonSet cannot schedule on node + jfsSetting := &jfsConfig.JfsSetting{ + UniqueId: "test-id", + MountMode: string(jfsConfig.MountModeDaemonSet), + VolumeId: "test-volume", + TargetPath: "/test/target", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + } + + // Create fake k8s client with a node that doesn't match DaemonSet affinity + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.NodeName, + Labels: map[string]string{ + "node-type": "worker", + }, + }, + } + + fakeClient := fake.NewSimpleClientset(node) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Create MountSelector + mounter := &k8sMount.FakeMounter{} + m := NewMountSelector(k8sClient, k8sMount.SafeFormatAndMount{ + Interface: mounter, + Exec: nil, + }) + + // The JMount should fall back to shared pod mount when DaemonSet cannot schedule + // This is a simplified test - in reality we'd need to mock more of the DaemonSet behavior + selector := m.(*MountSelector) + mnt := selector.selectMount(ctx, jfsSetting) + + // Should initially select DaemonSet mount + if _, ok := mnt.(*DaemonSetMount); !ok { + t.Errorf("Expected DaemonSetMount initially, got %T", mnt) + } + + // In a real test, we would call JMount and verify it falls back to PodMount + // For now, we're just testing that the selection logic works +} + +func TestMountSelector_Fallback(t *testing.T) { + ctx := context.Background() + jfsConfig.Namespace = "test-ns" + + tests := []struct { + name string + jfsSetting *jfsConfig.JfsSetting + configMap *corev1.ConfigMap + globalShareMount bool + wantMountMode jfsConfig.MountMode + }{ + { + name: "no configmap, use global per-pvc default", + jfsSetting: &jfsConfig.JfsSetting{ + VolumeId: "test-volume", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + configMap: nil, + globalShareMount: false, + wantMountMode: jfsConfig.MountModePVC, + }, + { + name: "no configmap, use global shared-pod", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + configMap: nil, + globalShareMount: true, + wantMountMode: jfsConfig.MountModeSharedPod, + }, + { + name: "invalid config in configmap, fallback to global", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + "test-sc": `mode: invalid-mode`, + }, + }, + globalShareMount: true, + wantMountMode: jfsConfig.MountModeSharedPod, + }, + { + name: "empty config in configmap, fallback to global", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + "test-sc": ``, + }, + }, + globalShareMount: false, + wantMountMode: jfsConfig.MountModePVC, + }, + { + name: "configmap overrides global settings", + jfsSetting: &jfsConfig.JfsSetting{ + VolumeId: "test-volume", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "test-sc", + }, + }, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + "test-sc": `mode: per-pvc`, + }, + }, + globalShareMount: true, + wantMountMode: jfsConfig.MountModePVC, + }, + { + name: "use default key when storage class not found", + jfsSetting: &jfsConfig.JfsSetting{ + UniqueId: "test-id", + PV: &corev1.PersistentVolume{ + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "unknown-sc", + }, + }, + }, + configMap: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: jfsConfig.MountConfigMapName, + Namespace: jfsConfig.Namespace, + }, + Data: map[string]string{ + jfsConfig.DefaultConfigKey: `mode: shared-pod`, + "other-sc": `mode: daemonset`, + }, + }, + globalShareMount: false, + wantMountMode: jfsConfig.MountModeSharedPod, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set global variables + jfsConfig.ByProcess = false + jfsConfig.StorageClassShareMount = tt.globalShareMount + + // Create fake k8s client + var objects []runtime.Object + if tt.configMap != nil { + objects = append(objects, tt.configMap) + } + + fakeClient := fake.NewSimpleClientset(objects...) + k8sClient := &k8sclient.K8sClient{} + k8sClient.Interface = fakeClient + + // Load mount config + err := jfsConfig.LoadMountConfig(ctx, k8sClient, tt.jfsSetting) + if err != nil { + t.Errorf("LoadMountConfig() error = %v", err) + return + } + + // Check mount mode + actualMode := jfsConfig.MountMode(tt.jfsSetting.MountMode) + if actualMode == "" { + // Determine from helper functions + if jfsConfig.ShouldUseDaemonSet(tt.jfsSetting) { + actualMode = jfsConfig.MountModeDaemonSet + } else if jfsConfig.ShouldUseSharedPod(tt.jfsSetting) { + actualMode = jfsConfig.MountModeSharedPod + } else { + actualMode = jfsConfig.MountModePVC + } + } + + if actualMode != tt.wantMountMode { + t.Errorf("Mount mode = %v, want %v", actualMode, tt.wantMountMode) + } + }) + } +} \ No newline at end of file diff --git a/pkg/k8sclient/client.go b/pkg/k8sclient/client.go index fe5adcf156..bf68a0953a 100644 --- a/pkg/k8sclient/client.go +++ b/pkg/k8sclient/client.go @@ -408,6 +408,41 @@ func (k *K8sClient) GetDaemonSet(ctx context.Context, dsName, namespace string) return ds, nil } +func (k *K8sClient) CreateDaemonSet(ctx context.Context, ds *appsv1.DaemonSet) (*appsv1.DaemonSet, error) { + return k.AppsV1().DaemonSets(ds.Namespace).Create(ctx, ds, metav1.CreateOptions{}) +} + +func (k *K8sClient) UpdateDaemonSet(ctx context.Context, ds *appsv1.DaemonSet) error { + _, err := k.AppsV1().DaemonSets(ds.Namespace).Update(ctx, ds, metav1.UpdateOptions{}) + return err +} + +func (k *K8sClient) DeleteDaemonSet(ctx context.Context, dsName, namespace string) error { + return k.AppsV1().DaemonSets(namespace).Delete(ctx, dsName, metav1.DeleteOptions{}) +} + +func (k *K8sClient) ListDaemonSet(ctx context.Context, namespace string, labelSelector *metav1.LabelSelector) ([]appsv1.DaemonSet, error) { + listOptions := metav1.ListOptions{} + if labelSelector != nil { + labelMap, _ := metav1.LabelSelectorAsMap(labelSelector) + listOptions.LabelSelector = labels.SelectorFromSet(labelMap).String() + } + + dsList, err := k.AppsV1().DaemonSets(namespace).List(ctx, listOptions) + if err != nil { + return nil, err + } + return dsList.Items, nil +} + +func (k *K8sClient) GetConfigMap(ctx context.Context, name, namespace string) (*corev1.ConfigMap, error) { + return k.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) +} + +func (k *K8sClient) GetNode(ctx context.Context, name string) (*corev1.Node, error) { + return k.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}) +} + func (k *K8sClient) ExecuteInContainer(ctx context.Context, podName, namespace, containerName string, cmd []string) (stdout string, stderr string, err error) { const tty = false @@ -450,14 +485,6 @@ func execute(ctx context.Context, method string, url *url.URL, config *restclien }) } -func (k *K8sClient) GetConfigMap(ctx context.Context, cmName, namespace string) (*corev1.ConfigMap, error) { - cm, err := k.CoreV1().ConfigMaps(namespace).Get(ctx, cmName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return cm, nil -} - func (k *K8sClient) CreateConfigMap(ctx context.Context, cfg *corev1.ConfigMap) error { _, err := k.CoreV1().ConfigMaps(cfg.Namespace).Create(ctx, cfg, metav1.CreateOptions{}) return err diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 99a25094dd..fc1608c694 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -9,8 +9,8 @@ importers: .: dependencies: markdownlint-cli2: - specifier: ^0.14.0 - version: 0.14.0 + specifier: ^0.17.2 + version: 0.17.2 markdownlint-rule-enhanced-proper-names: specifier: ^0.0.1 version: 0.0.1 @@ -836,6 +836,9 @@ packages: '@types/json-schema@7.0.15': resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + '@types/katex@0.16.7': + resolution: {integrity: sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==} + '@types/mdast@3.0.15': resolution: {integrity: sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==} @@ -1043,9 +1046,15 @@ packages: resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + character-entities-legacy@3.0.0: + resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==} + character-entities@2.0.2: resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} + character-reference-invalid@2.0.1: + resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==} + chokidar@3.6.0: resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} engines: {node: '>= 8.10.0'} @@ -1078,6 +1087,10 @@ packages: resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} engines: {node: '>= 10'} + commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} + engines: {node: '>= 12'} + concat-map@0.0.1: resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} @@ -1134,6 +1147,9 @@ packages: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} + devlop@1.1.0: + resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} + diff@5.2.0: resolution: {integrity: sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==} engines: {node: '>=0.3.1'} @@ -1377,6 +1393,12 @@ packages: resolution: {integrity: sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==} engines: {node: '>= 0.10'} + is-alphabetical@2.0.1: + resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==} + + is-alphanumerical@2.0.1: + resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} + is-arrayish@0.2.1: resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} @@ -1391,6 +1413,9 @@ packages: is-core-module@2.13.1: resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==} + is-decimal@2.0.1: + resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} + is-empty@1.2.0: resolution: {integrity: sha512-F2FnH/otLNJv0J6wc73A5Xo7oHLNnqplYqZhUu01tD54DIPvxIRSTSLkrUB/M0nHO4vo1O9PDfN4KoTxCzLh/w==} @@ -1410,6 +1435,9 @@ packages: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} + is-hexadecimal@2.0.1: + resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} + is-number@7.0.0: resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} engines: {node: '>=0.12.0'} @@ -1470,6 +1498,10 @@ packages: jsonfile@6.1.0: resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + katex@0.16.22: + resolution: {integrity: sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==} + hasBin: true + kind-of@6.0.3: resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} engines: {node: '>=0.10.0'} @@ -1532,15 +1564,11 @@ packages: peerDependencies: markdownlint-cli2: '>=0.0.4' - markdownlint-cli2@0.14.0: - resolution: {integrity: sha512-2cqdWy56frU2FTpbuGb83mEWWYuUIYv6xS8RVEoUAuKNw/hXPar2UYGpuzUhlFMngE8Omaz4RBH52MzfRbGshw==} + markdownlint-cli2@0.17.2: + resolution: {integrity: sha512-XH06ZOi8wCrtOSSj3p8y3yJzwgzYOSa7lglNyS3fP05JPRzRGyjauBb5UvlLUSCGysMmULS1moxdRHHudV+g/Q==} engines: {node: '>=18'} hasBin: true - markdownlint-micromark@0.1.10: - resolution: {integrity: sha512-no5ZfdqAdWGxftCLlySHSgddEjyW4kui4z7amQcGsSKfYC5v/ou+8mIQVyg9KQMeEZLNtz9OPDTj7nnTnoR4FQ==} - engines: {node: '>=18'} - markdownlint-rule-enhanced-proper-names@0.0.1: resolution: {integrity: sha512-aL0X4UiZB1yF4sDfnD3eEePVB2XcjAyja0R1FMuq8siegJ+EK3KjQnyCB0M/eDTi4wDiJLVd92ecbQyzMXeFWA==} @@ -1551,8 +1579,8 @@ packages: markdownlint-rule-no-trailing-slash-in-links@0.0.1: resolution: {integrity: sha512-9n7g6kFSSjvPQR4HDxpCJQ6hQrxMO733WwROC0ab19yzL7mFfWvLFCcRUhwru+mq/Zg7FSkIAB3PWjWTH0uX5g==} - markdownlint@0.35.0: - resolution: {integrity: sha512-wgp8yesWjFBL7bycA3hxwHRdsZGJhjhyP1dSxKVKrza0EPFYtn+mHtkVy6dvP1kGSjovyG5B8yNP6Frj0UFUJg==} + markdownlint@0.37.4: + resolution: {integrity: sha512-u00joA/syf3VhWh6/ybVFkib5Zpj2e5KB/cfCei8fkSRuums6nyisTWGqjTWIOFoFwuXoTBQQiqlB4qFKp8ncQ==} engines: {node: '>=18'} mdast-util-from-markdown@1.3.1: @@ -1583,69 +1611,140 @@ packages: micromark-core-commonmark@1.1.0: resolution: {integrity: sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==} + micromark-core-commonmark@2.0.2: + resolution: {integrity: sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w==} + + micromark-extension-directive@3.0.2: + resolution: {integrity: sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==} + + micromark-extension-gfm-autolink-literal@2.1.0: + resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} + + micromark-extension-gfm-footnote@2.1.0: + resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==} + + micromark-extension-gfm-table@2.1.0: + resolution: {integrity: sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==} + + micromark-extension-math@3.1.0: + resolution: {integrity: sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==} + micromark-factory-destination@1.1.0: resolution: {integrity: sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==} + micromark-factory-destination@2.0.1: + resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} + micromark-factory-label@1.1.0: resolution: {integrity: sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==} + micromark-factory-label@2.0.1: + resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} + micromark-factory-space@1.1.0: resolution: {integrity: sha512-cRzEj7c0OL4Mw2v6nwzttyOZe8XY/Z8G0rzmWQZTBi/jjwyw/U4uqKtUORXQrR5bAZZnbTI/feRV/R7hc4jQYQ==} + micromark-factory-space@2.0.1: + resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} + micromark-factory-title@1.1.0: resolution: {integrity: sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==} + micromark-factory-title@2.0.1: + resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} + micromark-factory-whitespace@1.1.0: resolution: {integrity: sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==} + micromark-factory-whitespace@2.0.1: + resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} + micromark-util-character@1.2.0: resolution: {integrity: sha512-lXraTwcX3yH/vMDaFWCQJP1uIszLVebzUa3ZHdrgxr7KEU/9mL4mVgCpGbyhvNLNlauROiNUq7WN5u7ndbY6xg==} + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} + micromark-util-chunked@1.1.0: resolution: {integrity: sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==} + micromark-util-chunked@2.0.1: + resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} + micromark-util-classify-character@1.1.0: resolution: {integrity: sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==} + micromark-util-classify-character@2.0.1: + resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} + micromark-util-combine-extensions@1.1.0: resolution: {integrity: sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==} + micromark-util-combine-extensions@2.0.1: + resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} + micromark-util-decode-numeric-character-reference@1.1.0: resolution: {integrity: sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==} + micromark-util-decode-numeric-character-reference@2.0.2: + resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} + micromark-util-decode-string@1.1.0: resolution: {integrity: sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==} micromark-util-encode@1.1.0: resolution: {integrity: sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==} + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} + micromark-util-html-tag-name@1.2.0: resolution: {integrity: sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==} + micromark-util-html-tag-name@2.0.1: + resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} + micromark-util-normalize-identifier@1.1.0: resolution: {integrity: sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==} + micromark-util-normalize-identifier@2.0.1: + resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} + micromark-util-resolve-all@1.1.0: resolution: {integrity: sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==} + micromark-util-resolve-all@2.0.1: + resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} + micromark-util-sanitize-uri@1.2.0: resolution: {integrity: sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==} + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} + micromark-util-subtokenize@1.1.0: resolution: {integrity: sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==} + micromark-util-subtokenize@2.1.0: + resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==} + micromark-util-symbol@1.1.0: resolution: {integrity: sha512-uEjpEYY6KMs1g7QfJ2eX1SQEV+ZT4rUD3UcF6l57acZvLNK7PBZL+ty82Z1qhK1/yXIY4bdx04FKMgR0g4IAag==} + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + micromark-util-types@1.1.0: resolution: {integrity: sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==} + micromark-util-types@2.0.1: + resolution: {integrity: sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ==} + micromark@3.2.0: resolution: {integrity: sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==} - micromatch@4.0.7: - resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} - engines: {node: '>=8.6'} + micromark@4.0.1: + resolution: {integrity: sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw==} micromatch@4.0.8: resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} @@ -1713,6 +1812,9 @@ packages: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} engines: {node: '>=6'} + parse-entities@4.0.2: + resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==} + parse-json@5.2.0: resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} engines: {node: '>=8'} @@ -3028,7 +3130,7 @@ snapshots: gray-matter: 4.0.3 js-yaml: 4.1.0 lodash: 4.17.21 - micromatch: 4.0.7 + micromatch: 4.0.8 resolve-pathname: 3.0.0 shelljs: 0.8.5 tslib: 2.6.3 @@ -3214,6 +3316,8 @@ snapshots: '@types/json-schema@7.0.15': {} + '@types/katex@0.16.7': {} + '@types/mdast@3.0.15': dependencies: '@types/unist': 2.0.10 @@ -3439,8 +3543,12 @@ snapshots: chalk@5.3.0: {} + character-entities-legacy@3.0.0: {} + character-entities@2.0.2: {} + character-reference-invalid@2.0.1: {} + chokidar@3.6.0: dependencies: anymatch: 3.1.3 @@ -3473,6 +3581,8 @@ snapshots: commander@7.2.0: {} + commander@8.3.0: {} + concat-map@0.0.1: {} concat-stream@2.0.0: @@ -3533,6 +3643,10 @@ snapshots: dequal@2.0.3: {} + devlop@1.1.0: + dependencies: + dequal: 2.0.3 + diff@5.2.0: {} dir-glob@3.0.1: @@ -3761,6 +3875,13 @@ snapshots: interpret@1.4.0: {} + is-alphabetical@2.0.1: {} + + is-alphanumerical@2.0.1: + dependencies: + is-alphabetical: 2.0.1 + is-decimal: 2.0.1 + is-arrayish@0.2.1: {} is-binary-path@2.1.0: @@ -3773,6 +3894,8 @@ snapshots: dependencies: hasown: 2.0.2 + is-decimal@2.0.1: {} + is-empty@1.2.0: {} is-extendable@0.1.1: {} @@ -3785,6 +3908,8 @@ snapshots: dependencies: is-extglob: 2.1.1 + is-hexadecimal@2.0.1: {} + is-number@7.0.0: {} is-plain-obj@4.1.0: {} @@ -3834,6 +3959,10 @@ snapshots: optionalDependencies: graceful-fs: 4.2.11 + katex@0.16.22: + dependencies: + commander: 8.3.0 + kind-of@6.0.3: {} kleur@4.1.5: {} @@ -3884,20 +4013,20 @@ snapshots: punycode.js: 2.3.1 uc.micro: 2.1.0 - markdownlint-cli2-formatter-default@0.0.5(markdownlint-cli2@0.14.0): + markdownlint-cli2-formatter-default@0.0.5(markdownlint-cli2@0.17.2): dependencies: - markdownlint-cli2: 0.14.0 + markdownlint-cli2: 0.17.2 - markdownlint-cli2@0.14.0: + markdownlint-cli2@0.17.2: dependencies: globby: 14.0.2 js-yaml: 4.1.0 jsonc-parser: 3.3.1 - markdownlint: 0.35.0 - markdownlint-cli2-formatter-default: 0.0.5(markdownlint-cli2@0.14.0) + markdownlint: 0.37.4 + markdownlint-cli2-formatter-default: 0.0.5(markdownlint-cli2@0.17.2) micromatch: 4.0.8 - - markdownlint-micromark@0.1.10: {} + transitivePeerDependencies: + - supports-color markdownlint-rule-enhanced-proper-names@0.0.1: dependencies: @@ -3909,10 +4038,19 @@ snapshots: dependencies: markdownlint-rule-helpers: 0.17.2 - markdownlint@0.35.0: + markdownlint@0.37.4: dependencies: markdown-it: 14.1.0 - markdownlint-micromark: 0.1.10 + micromark: 4.0.1 + micromark-core-commonmark: 2.0.2 + micromark-extension-directive: 3.0.2 + micromark-extension-gfm-autolink-literal: 2.1.0 + micromark-extension-gfm-footnote: 2.1.0 + micromark-extension-gfm-table: 2.1.0 + micromark-extension-math: 3.1.0 + micromark-util-types: 2.0.1 + transitivePeerDependencies: + - supports-color mdast-util-from-markdown@1.3.1: dependencies: @@ -3978,12 +4116,83 @@ snapshots: micromark-util-types: 1.1.0 uvu: 0.5.6 + micromark-core-commonmark@2.0.2: + dependencies: + decode-named-character-reference: 1.0.2 + devlop: 1.1.0 + micromark-factory-destination: 2.0.1 + micromark-factory-label: 2.0.1 + micromark-factory-space: 2.0.1 + micromark-factory-title: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-html-tag-name: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + + micromark-extension-directive@3.0.2: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + parse-entities: 4.0.2 + + micromark-extension-gfm-autolink-literal@2.1.0: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + + micromark-extension-gfm-footnote@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-core-commonmark: 2.0.2 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + + micromark-extension-gfm-table@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + + micromark-extension-math@3.1.0: + dependencies: + '@types/katex': 0.16.7 + devlop: 1.1.0 + katex: 0.16.22 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-factory-destination@1.1.0: dependencies: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 + micromark-factory-destination@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-factory-label@1.1.0: dependencies: micromark-util-character: 1.2.0 @@ -3991,11 +4200,23 @@ snapshots: micromark-util-types: 1.1.0 uvu: 0.5.6 + micromark-factory-label@2.0.1: + dependencies: + devlop: 1.1.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-factory-space@1.1.0: dependencies: micromark-util-character: 1.2.0 micromark-util-types: 1.1.0 + micromark-factory-space@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-types: 2.0.1 + micromark-factory-title@1.1.0: dependencies: micromark-factory-space: 1.1.0 @@ -4003,6 +4224,13 @@ snapshots: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 + micromark-factory-title@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-factory-whitespace@1.1.0: dependencies: micromark-factory-space: 1.1.0 @@ -4010,30 +4238,61 @@ snapshots: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 + micromark-factory-whitespace@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-util-character@1.2.0: dependencies: micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 + micromark-util-character@2.1.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-util-chunked@1.1.0: dependencies: micromark-util-symbol: 1.1.0 + micromark-util-chunked@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-classify-character@1.1.0: dependencies: micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 micromark-util-types: 1.1.0 + micromark-util-classify-character@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-util-combine-extensions@1.1.0: dependencies: micromark-util-chunked: 1.1.0 micromark-util-types: 1.1.0 + micromark-util-combine-extensions@2.0.1: + dependencies: + micromark-util-chunked: 2.0.1 + micromark-util-types: 2.0.1 + micromark-util-decode-numeric-character-reference@1.1.0: dependencies: micromark-util-symbol: 1.1.0 + micromark-util-decode-numeric-character-reference@2.0.2: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-decode-string@1.1.0: dependencies: decode-named-character-reference: 1.0.2 @@ -4043,22 +4302,40 @@ snapshots: micromark-util-encode@1.1.0: {} + micromark-util-encode@2.0.1: {} + micromark-util-html-tag-name@1.2.0: {} + micromark-util-html-tag-name@2.0.1: {} + micromark-util-normalize-identifier@1.1.0: dependencies: micromark-util-symbol: 1.1.0 + micromark-util-normalize-identifier@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-resolve-all@1.1.0: dependencies: micromark-util-types: 1.1.0 + micromark-util-resolve-all@2.0.1: + dependencies: + micromark-util-types: 2.0.1 + micromark-util-sanitize-uri@1.2.0: dependencies: micromark-util-character: 1.2.0 micromark-util-encode: 1.1.0 micromark-util-symbol: 1.1.0 + micromark-util-sanitize-uri@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-subtokenize@1.1.0: dependencies: micromark-util-chunked: 1.1.0 @@ -4066,10 +4343,21 @@ snapshots: micromark-util-types: 1.1.0 uvu: 0.5.6 + micromark-util-subtokenize@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + micromark-util-symbol@1.1.0: {} + micromark-util-symbol@2.0.1: {} + micromark-util-types@1.1.0: {} + micromark-util-types@2.0.1: {} + micromark@3.2.0: dependencies: '@types/debug': 4.1.12 @@ -4092,10 +4380,27 @@ snapshots: transitivePeerDependencies: - supports-color - micromatch@4.0.7: + micromark@4.0.1: dependencies: - braces: 3.0.3 - picomatch: 2.3.1 + '@types/debug': 4.1.12 + debug: 4.3.5 + decode-named-character-reference: 1.0.2 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.2 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-combine-extensions: 2.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-encode: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.1 + transitivePeerDependencies: + - supports-color micromatch@4.0.8: dependencies: @@ -4152,6 +4457,16 @@ snapshots: dependencies: callsites: 3.1.0 + parse-entities@4.0.2: + dependencies: + '@types/unist': 2.0.10 + character-entities-legacy: 3.0.0 + character-reference-invalid: 2.0.1 + decode-named-character-reference: 1.0.2 + is-alphanumerical: 2.0.1 + is-decimal: 2.0.1 + is-hexadecimal: 2.0.1 + parse-json@5.2.0: dependencies: '@babel/code-frame': 7.24.7