Statefulset Kubernetes : volumeMounts[0].name: Not found

265 views
Skip to first unread message

Raphael Stonehorse

unread,
Nov 7, 2023, 11:54:30 AM11/7/23
to kubernetes-sig-storage
In https://kubernetes.io/docs/tutorials/stateful-application/cassandra/ we read in the `/application/cassandra/cassandra-statefulset.yaml` sample:
# These volume mounts are persistent. They are like inline claims, # but not exactly because the names need to match exactly one of # the stateful pod volumes.
So... I deployed an `nfs-subdir-external-provisioner` following these indications: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/blob/master/charts/nfs-subdir-external-provisioner/README.md#install-multiple-provisioners , specifying the `volume name used inside the pod` through `nfs.volumeName` :

image


    root@k8s-eu-1-master:~# helm install k8s-eu-1-worker-1-nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
    > --set nfs.server=38.242.249.121 \
    > --set nfs.path=/srv/shared-k8s-eu-1-worker-1 \
    > --set storageClass.name=k8s-eu-1-worker-1 \
    > --set storageClass.provisionerName=k8s-sigs.io/k8s-eu-1-worker-1 \
    > --set nfs.volumeName=k8s-eu-1-worker-1-nfs-v
    NAME: k8s-eu-1-worker-1-nfs-subdir-external-provisioner
    LAST DEPLOYED: Tue Nov  7 17:14:42 2023
    NAMESPACE: default
    STATUS: deployed
    REVISION: 1
    TEST SUITE: None

    root@k8s-eu-1-master:~# helm ls
    NAME                                             NAMESPACE REVISION UPDATED                                 STATUS   CHART                                 APP VERSION
    k8s-eu-1-worker-1-nfs-subdir-external-provisioner default   1       2023-11-07 17:14:42.197847444 +0100 CET deployed nfs-subdir-external-provisioner-4.0.18 4.0.2      

    root@k8s-eu-1-master:~# kubectl get deployments
    NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
    k8s-eu-1-worker-1-nfs-subdir-external-provisioner   1/1     1            1           2m9s

    root@k8s-eu-1-master:~# kubectl get pods
    NAME                                                              READY   STATUS    RESTARTS   AGE
    k8s-eu-1-worker-1-nfs-subdir-external-provisioner-79fff4ff2qx7k   1/1     Running   0          2m27s

output of `kubuectl describe pod` :

    root@k8s-eu-1-master:~# kubectl describe pod k8s-eu-1-worker-1-nfs-subdir-external-provisioner-79fff4ff2qx7k
    Name:             k8s-eu-1-worker-1-nfs-subdir-external-provisioner-79fff4ff2qx7k
    Namespace:        default
    Priority:         0
    Service Account:  k8s-eu-1-worker-1-nfs-subdir-external-provisioner
    Node:             k8s-eu-1-worker-2/yy.yyy.yyy.yyy
    Start Time:       Tue, 07 Nov 2023 17:14:42 +0100
    Labels:           app=nfs-subdir-external-provisioner
                      pod-template-hash=79fff4ff6
                      release=k8s-eu-1-worker-1-nfs-subdir-external-provisioner
    Annotations:      cni.projectcalico.org/containerID: 2c7d048ecf0861c60a471e93e41d20dca0c7c58c20a3369ed1463820e898d1a7
                      cni.projectcalico.org/podIP: 192.168.236.18/32
                      cni.projectcalico.org/podIPs: 192.168.236.18/32
    Status:           Running
    IP:               192.168.236.18
    IPs:
      IP:           192.168.236.18
    Controlled By:  ReplicaSet/k8s-eu-1-worker-1-nfs-subdir-external-provisioner-79fff4ff6
    Containers:
      nfs-subdir-external-provisioner:
        Container ID:   containerd://c4afd4f56bdb2d69aa2be23d6d47e843ceaa1f823459c7cffbf5dc859f59e44b
        Image:          registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
        Image ID:       registry.k8s.io/sig-storage/nfs-subdir-external-provisioner@sha256:63d5e04551ec8b5aae83b6f35938ca5ddc50a88d85492d9731810c31591fa4c9
        Port:           <none>
        Host Port:      <none>
        State:          Running
          Started:      Tue, 07 Nov 2023 17:14:43 +0100
        Ready:          True
        Restart Count:  0
        Environment:
          PROVISIONER_NAME:  k8s-sigs.io/k8s-eu-1-worker-1
          NFS_SERVER:        xx.xxx.xxx.xxx
          NFS_PATH:          /srv/shared-k8s-eu-1-worker-1
        Mounts:
          /persistentvolumes from k8s-eu-1-worker-1-nfs-v (rw)
          /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-knxw8 (ro)
    Conditions:
      Type              Status
      Initialized       True
      Ready             True
      ContainersReady   True
      PodScheduled      True
    Volumes:
      k8s-eu-1-worker-1-nfs-v:  // <---------------------------------------------------------------------------------------
        Type:      NFS (an NFS mount that lasts the lifetime of a pod)
        Server:   xx.xxx.xxx.xxx
        Path:      /srv/shared-k8s-eu-1-worker-1
        ReadOnly:  false
      kube-api-access-knxw8:
        Type:                    Projected (a volume that contains injected data from multiple sources)
        TokenExpirationSeconds:  3607
        ConfigMapName:           kube-root-ca.crt
        ConfigMapOptional:       <nil>
        DownwardAPI:             true
    QoS Class:                   BestEffort
    Node-Selectors:              <none>
    Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                                 node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
    Events:
      Type    Reason     Age    From               Message
      ----    ------     ----   ----               -------
      Normal  Scheduled  2m49s  default-scheduler  Successfully assigned default/k8s-eu-1-worker-1-nfs-subdir-external-provisioner-79fff4ff2qx7k to k8s-eu-1-worker-2
      Normal  Pulled     2m49s  kubelet            Container image "registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2" already present on machine
      Normal  Created    2m49s  kubelet            Created container nfs-subdir-external-provisioner
      Normal  Started    2m49s  kubelet            Started container nfs-subdir-external-provisioner


In `cassandra-statefulset.yaml` I've set the `volumeMounts as `pod's volume` : "k8s-eu-1-worker-1-nfs-v" :

        volumeMounts:
        - name: k8s-eu-1-worker-1-nfs-v
          mountPath: /srv/shared-k8s-eu-1-worker-1

This is the entire `cassandra-statefulset.yaml` :

    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: cassandra
      labels:
        app: cassandra
    spec:
      serviceName: cassandra
      replicas: 3
      selector:
        matchLabels:
          app: cassandra
      template:
        metadata:
          labels:
            app: cassandra
        spec:
          terminationGracePeriodSeconds: 1800
          containers:
          - name: cassandra
            image: gcr.io/google-samples/cassandra:v13
            imagePullPolicy: Always
            ports:
            - containerPort: 7000
              name: intra-node
            - containerPort: 7001
              name: tls-intra-node
            - containerPort: 7199
              name: jmx
            - containerPort: 9042
              name: cql
            resources:
              limits:
                cpu: "500m"
                memory: 1Gi
              requests:
                cpu: "500m"
                memory: 1Gi
            securityContext:
              capabilities:
                add:
                  - IPC_LOCK
            lifecycle:
              preStop:
                exec:
                  command:
                  - /bin/sh
                  - -c
                  - nodetool drain
            env:
              - name: MAX_HEAP_SIZE
                value: 512M
              - name: HEAP_NEWSIZE
                value: 100M
              - name: CASSANDRA_SEEDS
                value: "cassandra-0.cassandra.default.svc.cluster.local"
              - name: CASSANDRA_CLUSTER_NAME
                value: "K8Demo"
              - name: CASSANDRA_DC
                value: "DC1-K8Demo"
              - name: CASSANDRA_RACK
                value: "Rack1-K8Demo"
              - name: POD_IP
                valueFrom:
                  fieldRef:
                    fieldPath: status.podIP
            readinessProbe:
              exec:
                command:
                - /bin/bash
                - -c
                - /ready-probe.sh
              initialDelaySeconds: 15
              timeoutSeconds: 5
            # These volume mounts are persistent. They are like inline claims,
            # but not exactly because the names need to match exactly one of
            # the stateful pod volumes.
            volumeMounts:
            - name: k8s-eu-1-worker-1-nfs-v
              mountPath: /srv/shared-k8s-eu-1-worker-1
   
      # These are converted to volume claims by the controller
      # and mounted at the paths mentioned above.
      # do not use these in production until ssd GCEPersistentDisk or other ssd pd
      volumeClaimTemplates:
      - metadata:
          name: k8s-eu-1-worker-1
        spec:
          accessModes: [ "ReadWriteOnce" ]
          storageClassName: k8s-eu-1-worker-1
          resources:
            requests:
              storage: 1Gi
    ---
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
      name: k8s-eu-1-worker-1
    provisioner: k8s-sigs.io/k8s-eu-1-worker-1
    parameters:
      #type: pd-ss


When I apply this configuration I get : `spec.containers[0].volumeMounts[0].name: Not found: "k8s-eu-1-worker-1-nfs-`

    root@k8s-eu-1-master:~# kubectl apply -f ./cassandraStatefulApp/cassandra-statefulset.yaml
    statefulset.apps/cassandra created
    Warning: resource storageclasses/k8s-eu-1-worker-1 is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
    The StorageClass "k8s-eu-1-worker-1" is invalid: parameters: Forbidden: updates to parameters are forbidden.
   
    root@k8s-eu-1-master:~# kubectl get statefulsets
    NAME        READY   AGE
    cassandra   0/3     21s

   
    root@k8s-eu-1-master:~# kubectl describe statefulsets cassandra
    Name:               cassandra
    Namespace:          default
    CreationTimestamp:  Tue, 07 Nov 2023 17:33:40 +0100
    Selector:           app=cassandra
    Labels:             app=cassandra
    Annotations:        <none>
    Replicas:           3 desired | 0 total
    Update Strategy:    RollingUpdate
      Partition:        0
    Pods Status:        0 Running / 0 Waiting / 0 Succeeded / 0 Failed
    Pod Template:
      Labels:  app=cassandra
      Containers:
       cassandra:
        Image:       gcr.io/google-samples/cassandra:v13
        Ports:       7000/TCP, 7001/TCP, 7199/TCP, 9042/TCP
        Host Ports:  0/TCP, 0/TCP, 0/TCP, 0/TCP
        Limits:
          cpu:     500m
          memory:  1Gi
        Requests:
          cpu:      500m
          memory:   1Gi
        Readiness:  exec [/bin/bash -c /ready-probe.sh] delay=15s timeout=5s period=10s #success=1 #failure=3
        Environment:
          MAX_HEAP_SIZE:           512M
          HEAP_NEWSIZE:            100M
          CASSANDRA_SEEDS:         cassandra-0.cassandra.default.svc.cluster.local
          CASSANDRA_CLUSTER_NAME:  K8Demo
          CASSANDRA_DC:            DC1-K8Demo
          CASSANDRA_RACK:          Rack1-K8Demo
          POD_IP:                   (v1:status.podIP)
        Mounts:
          /srv/shared-k8s-eu-1-worker-1 from k8s-eu-1-worker-1-nfs-v (rw)
      Volumes:  <none>
    Volume Claims:
      Name:          k8s-eu-1-worker-1
      StorageClass:  k8s-eu-1-worker-1
      Labels:        <none>
      Annotations:   <none>
      Capacity:      1Gi
      Access Modes:  [ReadWriteOnce]
    Events:
      Type     Reason        Age                 From                    Message
      ----     ------        ----                ----                    -------
      Warning  FailedCreate  20s (x13 over 41s)  statefulset-controller  create Pod cassandra-0 in StatefulSet cassandra failed error: Pod "cassandra-0" is invalid: spec.containers[0].volumeMounts[0].name: Not found: "k8s-eu-1-worker-1-nfs-v"


What am I doing wrong? What is the correct way to specify within the `statefulset.yaml` file the `name` of the `VolumeMounts` ?

Matthew Cary

unread,
Nov 7, 2023, 12:05:22 PM11/7/23
to Raphael Stonehorse, kubernetes-sig-storage
The volumeMounts name needs to match the name in the volume claim template I think:

            volumeMounts:
            - name: k8s-eu-1-worker-1-nfs-v   <<<<*****<<<<

              mountPath: /srv/shared-k8s-eu-1-worker-1
   
      # These are converted to volume claims by the controller
      # and mounted at the paths mentioned above.
      # do not use these in production until ssd GCEPersistentDisk or other ssd pd
      volumeClaimTemplates:
      - metadata:
          name: k8s-eu-1-worker-1  <<<<*****<<<<

--
You received this message because you are subscribed to the Google Groups "kubernetes-sig-storage" group.
To unsubscribe from this group and stop receiving emails from it, send an email to kubernetes-sig-st...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/kubernetes-sig-storage/28f4b641-bae9-43e9-9166-bb5ed65dca05n%40googlegroups.com.

Raphael Stonehorse

unread,
Nov 7, 2023, 12:21:41 PM11/7/23
to kubernetes-sig-storage
Yes. You are right Matt

With

            volumeMounts:
            - name: k8s-eu-1-worker-1

              mountPath: /srv/shared-k8s-eu-1-worker-1

      # These are converted to volume claims by the controller
      # and mounted at the paths mentioned above.
      # do not use these in production until ssd GCEPersistentDisk or other ssd pd
      volumeClaimTemplates:
      - metadata:
          name: k8s-eu-1-worker-1


I get `StatefulSet cassandra successful` :  

    root@k8s-eu-1-master:~# kubectl get statefulsets
    NAME        READY   AGE
    cassandra   0/3     14s

    root@k8s-eu-1-master:~# kubectl describe statefulsets cassandra
    Name:               cassandra
    Namespace:          default
    CreationTimestamp:  Tue, 07 Nov 2023 18:16:33 +0100

    Selector:           app=cassandra
    Labels:             app=cassandra
    Annotations:        <none>
    Replicas:           3 desired | 1 total

    Update Strategy:    RollingUpdate
      Partition:        0
    Pods Status:        1 Running / 0 Waiting / 0 Succeeded / 0 Failed

    Pod Template:
      Labels:  app=cassandra
      Containers:
       cassandra:
        Image:       gcr.io/google-samples/cassandra:v13
        Ports:       7000/TCP, 7001/TCP, 7199/TCP, 9042/TCP
        Host Ports:  0/TCP, 0/TCP, 0/TCP, 0/TCP
        Limits:
          cpu:     500m
          memory:  1Gi
        Requests:
          cpu:      500m
          memory:   1Gi
        Readiness:  exec [/bin/bash -c /ready-probe.sh] delay=15s timeout=5s period=10s #success=1 #failure=3
        Environment:
          MAX_HEAP_SIZE:           512M
          HEAP_NEWSIZE:            100M
          CASSANDRA_SEEDS:         cassandra-0.cassandra.default.svc.cluster.local
          CASSANDRA_CLUSTER_NAME:  K8Demo
          CASSANDRA_DC:            DC1-K8Demo
          CASSANDRA_RACK:          Rack1-K8Demo
          POD_IP:                   (v1:status.podIP)
        Mounts:
          /srv/shared-k8s-eu-1-worker-1 from k8s-eu-1-worker-1 (rw)

      Volumes:  <none>
    Volume Claims:
      Name:          k8s-eu-1-worker-1
      StorageClass:  k8s-eu-1-worker-1
      Labels:        <none>
      Annotations:   <none>
      Capacity:      1Gi
      Access Modes:  [ReadWriteOnce]
    Events:
      Type    Reason            Age   From                    Message
      ----    ------            ----  ----                    -------
      Normal  SuccessfulCreate  27s   statefulset-controller  create Pod cassandra-0 in StatefulSet cassandra successful


Thank you very much Matt
Reply all
Reply to author
Forward
0 new messages