Skip to content

Unable to get session bus connection: Cannot spawn a message bus without a machine-id #3489

Open
@cloudcafetech

Description

@cloudcafetech

Env: CentOS 9, RKE2, Cilium, HCO and whereabouts

  • pod & vmi
k get po,vmi
NAME                             READY   STATUS    RESTARTS   AGE
pod/virt-launcher-testvm-75rx4   3/3     Running   0          21s

NAME                                        AGE   PHASE       IP    NODENAME       READY
virtualmachineinstance.kubevirt.io/testvm   21s   Scheduled         lenevo-ts-w2   False
  • logs
k logs -f pod/virt-launcher-testvm-75rx4
{"component":"virt-launcher","level":"info","msg":"Collected all requested hook sidecar sockets","pos":"manager.go:88","timestamp":"2025-04-20T03:00:26.899707Z"}
{"component":"virt-launcher","level":"info","msg":"Sorted all collected sidecar sockets per hook point based on their priority and name: map[]","pos":"manager.go:91","timestamp":"2025-04-20T03:00:26.899752Z"}
{"component":"virt-launcher","level":"info","msg":"Connecting to libvirt daemon: qemu+unix:///session?socket=/var/run/libvirt/virtqemud-sock","pos":"libvirt.go:547","timestamp":"2025-04-20T03:00:26.900007Z"}
{"component":"virt-launcher","level":"info","msg":"Connecting to libvirt daemon failed: virError(Code=38, Domain=7, Message='Failed to connect socket to '/var/run/libvirt/virtqemud-sock': No such file or directory')","pos":"libvirt.go:555","timestamp":"2025-04-20T03:00:26.900341Z"}
{"component":"virt-launcher","level":"info","msg":"libvirt version: 10.10.0, package: 4.el9 ([email protected], 2025-01-16-13:06:37, )","subcomponent":"libvirt","thread":"41","timestamp":"2025-04-20T03:00:26.914000Z"}
{"component":"virt-launcher","level":"info","msg":"hostname: testvm","subcomponent":"libvirt","thread":"41","timestamp":"2025-04-20T03:00:26.914000Z"}
{"component":"virt-launcher","level":"error","msg":"internal error: Unable to get session bus connection: Cannot spawn a message bus without a machine-id: Unable to load /var/lib/dbus/machine-id or /etc/machine-id: Failed to open file “/var/lib/dbus/machine-id”: No such file or directory","pos":"virGDBusGetSessionBus:126","subcomponent":"libvirt","thread":"41","timestamp":"2025-04-20T03:00:26.914000Z"}
{"component":"virt-launcher","level":"error","msg":"internal error: Unable to get system bus connection: Could not connect: No such file or directory","pos":"virGDBusGetSystemBus:99","subcomponent":"libvirt","thread":"41","timestamp":"2025-04-20T03:00:26.914000Z"}
{"component":"virt-launcher","level":"info","msg":"Connected to libvirt daemon","pos":"libvirt.go:563","timestamp":"2025-04-20T03:00:27.405221Z"}
{"component":"virt-launcher","level":"info","msg":"Registered libvirt event notify callback","pos":"client.go:573","timestamp":"2025-04-20T03:00:27.413239Z"}
{"component":"virt-launcher","level":"info","msg":"Marked as ready","pos":"virt-launcher.go:76","timestamp":"2025-04-20T03:00:27.414591Z"}
  • VM yaml
k get vm testvm  -oyaml

apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
  annotations:
    kubevirt.io/latest-observed-api-version: v1
    kubevirt.io/storage-observed-api-version: v1
  creationTimestamp: "2025-04-20T03:00:18Z"
  finalizers:
  - kubevirt.io/virtualMachineControllerFinalize
  generation: 1
  name: testvm
  namespace: virtualmachines
  resourceVersion: "28019"
  uid: f23ce375-f76d-4393-8667-80e1106752ca
spec:
  runStrategy: Always
  template:
    metadata:
      annotations:
        kubevirt.io/allow-pod-bridge-network-live-migration: "true"
      creationTimestamp: null
      labels:
        kubevirt.io/domain: testvm
        kubevirt.io/size: small
    spec:
      architecture: amd64
      domain:
        devices:
          disks:
          - disk:
              bus: virtio
            name: containerdisk
          - disk:
              bus: virtio
            name: cloudinitdisk
          interfaces:
          - bridge: {}
            macAddress: 02:b8:76:00:00:02
            name: external
        machine:
          type: q35
        resources:
          requests:
            memory: 64M
      networks:
      - multus:
          networkName: virtualmachines/static-132
        name: external
      volumes:
      - containerDisk:
          image: quay.io/kubevirt/cirros-container-disk-demo
        name: containerdisk
      - cloudInitNoCloud:
          networkData: |-
            version: 1
            config:
            - type: physical
              name: eth0
              subnets:
                - type: dhcp
          userDataBase64: SGkuXG4=
        name: cloudinitdisk
status:
  conditions:
  - lastProbeTime: "2025-04-20T03:00:19Z"
    lastTransitionTime: "2025-04-20T03:00:19Z"
    message: Guest VM is not reported as running
    reason: GuestNotRunning
    status: "False"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: null
    status: "True"
    type: LiveMigratable
  - lastProbeTime: null
    lastTransitionTime: null
    status: "True"
    type: StorageLiveMigratable
  - lastProbeTime: null
    lastTransitionTime: "2025-04-20T03:00:32Z"
    message: 'failed to configure vmi network: setup failed, err: pod link (pod3c4623849a4)
      is missing'
    reason: Synchronizing with the Domain failed.
    status: "False"
    type: Synchronized
  created: true
  desiredGeneration: 1
  observedGeneration: 1
  printableStatus: Starting
  runStrategy: Always
  volumeSnapshotStatuses:
  - enabled: false
    name: containerdisk
    reason: Snapshot is not supported for this volumeSource type [containerdisk]
  - enabled: false
    name: cloudinitdisk
    reason: Snapshot is not supported for this volumeSource type [cloudinitdisk]
  • Node
[root@lenevo-ts-w2 ~]# ls -l /var/lib/dbus/machine-id
lrwxrwxrwx. 1 root root 15 Mar 31 07:43 /var/lib/dbus/machine-id -> /etc/machine-id
[root@lenevo-ts-w2 ~]# more /etc/machine-id
bf8f638467cf680fdc9dad306803d692
[root@lenevo-ts-w2 ~]# more /var/lib/dbus/machine-id
bf8f638467cf680fdc9dad306803d692
[root@lenevo-ts-w2 ~]#
HCONS=kubevirt-hyperconverged
for ns in $HCONS openshift konveyor-forklift virtualmachines olm; do  kubectl create ns $ns; done
for ns in $HCONS openshift konveyor-forklift virtualmachines olm; do  kubectl label ns $ns pod-security.kubernetes.io/enforce=privileged ; done

LABEL_SELECTOR_ARG="-l name!=ssp-operator,name!=hyperconverged-cluster-cli-download"
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/cluster-network-addons00.crd.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/containerized-data-importer00.crd.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/hco00.crd.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/kubevirt00.crd.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/hostpath-provisioner00.crd.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/scheduling-scale-performance00.crd.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/crds/application-aware-quota00.crd.yaml

kubectl apply ${LABEL_SELECTOR_ARG} -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/cert-manager.yaml
kubectl -n cert-manager wait deployment/cert-manager --for=condition=Available --timeout="300s"
kubectl -n cert-manager wait deployment/cert-manager-webhook --for=condition=Available --timeout="300s"

kubectl apply ${LABEL_SELECTOR_ARG} -n $HCONS -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/cluster_role.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -n $HCONS -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/service_account.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -n $HCONS -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/cluster_role_binding.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -n $HCONS -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/webhooks.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -n $HCONS -f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/operator.yaml

kubectl -n $HCONS wait deployment/hyperconverged-cluster-webhook --for=condition=Available --timeout="300s"

wget https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/main/deploy/hco.cr.yaml
kubectl apply ${LABEL_SELECTOR_ARG} -n $HCONS -f hco.cr.yaml

Metadata

Metadata

Assignees

No one assigned

    Labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions