Hello,
I am trying to add /dev/kvm
to LXD containers. I changed my profile to include the following:
lxc profile device add <profile> kvm unix-char source=/dev/kvm
lxc profile device add <profile> vhost-net unix-char source=/dev/vhost-net
lxc profile device add <profile> vhost-vsock unix-char source=/dev/vhost-vsock
This works fine, but I need different permissions inside the container, therefore I also added the mode parameter (manually and also via the profile device add
command). lxc config show <container> --expanded
also shows that this is set:
devices:
kvm:
mode: "0666"
source: /dev/kvm
type: unix-char
but inside the container, it looks like this:
# ls -ahl /dev/kvm
crw-rw---- 1 root kvm 10, 232 Nov 3 15:12 /dev/kvm
Manually doing chmod 666
inside the container works:
# ls -ahl /dev/kvm
crw-rw-rw- 1 root kvm 10, 232 Nov 3 15:15 /dev/kvm
Am I mis-understanding the option or am I missing something here?
Some server info:
driver: qemu | lxc
driver_version: 8.2.1 | 6.0.0
instance_types:
- virtual-machine
- container
firewall: nftables
kernel: Linux
kernel_architecture: x86_64
kernel_features:
idmapped_mounts: "true"
netnsid_getifaddrs: "true"
seccomp_listener: "true"
seccomp_listener_continue: "true"
uevent_injection: "true"
unpriv_fscaps: "true"
kernel_version: 6.8.0-48-generic
lxc_features:
cgroup2: "true"
core_scheduling: "true"
devpts_fd: "true"
idmapped_mounts_v2: "true"
mount_injection_file: "true"
network_gateway_device_route: "true"
network_ipvlan: "true"
network_l2proxy: "true"
network_phys_macvlan_mtu: "true"
network_veth_router: "true"
pidfd: "true"
seccomp_allow_deny_syntax: "true"
seccomp_notify: "true"
seccomp_proxy_send_notify_fd: "true"
os_name: Ubuntu
os_version: "24.04"
project: default
server: lxd
server_clustered: false
server_event_mode: full-mesh
server_pid: 2028
server_version: 5.21.2
server_lts: true
storage: zfs
storage_version: 2.2.2-0ubuntu9
storage_supported_drivers:
- name: powerflex
version: 1.16 (nvme-cli)
remote: true
- name: zfs
version: 2.2.2-0ubuntu9
remote: false
- name: btrfs
version: 5.16.2
remote: false
- name: ceph
version: 17.2.7
remote: true
- name: cephfs
version: 17.2.7
remote: true
- name: cephobject
version: 17.2.7
remote: true
- name: dir
version: "1"
remote: false
- name: lvm
version: 2.03.11(2) (2021-01-08) / 1.02.175 (2021-01-08) / 4.48.0
remote: false
I just performed the following steps:
$ snap refresh lxd --channel=5.21/stable
$ lxc launch ubuntu:24.04 c1
$ lxc profile create kvm
$ lxc profile device add kvm kvm unix-char source=/dev/kvm
$ lxc profile device add kvm vhost-net unix-char source=/dev/vhost-net
$ lxc profile device add kvm vhost-vsock unix-char source=/dev/vhost-vsock
$ lxc profile device set kvm kvm mode=0666
$ lxc profile apply c1 default,kvm
$ lxc shell c1
root@c1:~# ls -ahl /dev/kvm
crw-rw-rw- 1 root root 10, 232 Nov 8 16:28 /dev/kvm
Subsequently unsetting the mode also appears to work:
$ lxc profile device unset kvm kvm mode
$ lxc exec c1 -- ls -ahl /dev/kvm
crw-rw---- 1 root root 10, 232 Nov 8 16:38 /dev/kvm
Could you please post a minimal reproducer for this? Thank you.
Hello markylaing,
thanks for checking and testing. It seems to be a problem when the profile is specified at launch already:
root@host ~ # lxc profile show default
name: default
description: Default LXD profile
config: {}
devices:
network:
name: eth0
nictype: bridged
parent: lxdbr0
type: nic
root:
path: /
pool: default_legacy
type: disk
root@host ~ # lxc profile show kvm
name: kvm
description: ""
config: {}
devices:
kvm:
mode: "0666"
source: /dev/kvm
type: unix-char
vhost-net:
source: /dev/vhost-net
type: unix-char
vhost-vsock:
source: /dev/vhost-vsock
type: unix-char
root@host ~ # lxc launch --profile=default --profile kvm ubuntu:24.04 c1
Creating c1
Starting c1
root@host ~ # lxc config show c1 --expanded
architecture: x86_64
config:
image.architecture: amd64
image.description: ubuntu 24.04 LTS amd64 (release) (20241106)
image.label: release
image.os: ubuntu
image.release: noble
image.serial: "20241106"
image.type: squashfs
image.version: "24.04"
volatile.base_image: 602f1cb373c046923c69c17136eee708c6ea5e8b1d6b7618275ee0ec246b4fe5
volatile.cloud-init.instance-id: 7bd82c98-15fd-41e2-8344-464bd3f13812
volatile.idmap.base: "0"
volatile.idmap.current: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.idmap.next: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.last_state.idmap: '[]'
volatile.last_state.power: RUNNING
volatile.network.host_name: veth06d84b53
volatile.network.hwaddr: 00:16:3e:66:de:a0
volatile.uuid: cc0a6b7e-2b07-497d-80f5-3c33e49bd397
volatile.uuid.generation: cc0a6b7e-2b07-497d-80f5-3c33e49bd397
devices:
kvm:
mode: "0666"
source: /dev/kvm
type: unix-char
network:
name: eth0
nictype: bridged
parent: lxdbr0
type: nic
root:
path: /
pool: default_legacy
type: disk
vhost-net:
source: /dev/vhost-net
type: unix-char
vhost-vsock:
source: /dev/vhost-vsock
type: unix-char
ephemeral: false
profiles:
- default
- kvm
stateful: false
description: ""
root@host ~ # lxc shell c1
root@c1:~# ls -ahl /dev/kvm
crw-rw---- 1 root kvm 10, 232 Nov 8 17:22 /dev/kvm
If I apply the profile separately (after launch), it seems to work fine indeed.
Indeed I was able to reproduce the problem as you said. Also, following the steps shown by @markylaing and afterwards restarting the instance results in a similar problem:
➜ ~ lxc profile device set kvm kvm mode=0666
➜ ~ lxc exec tc1 -- ls -ahl /dev/kvm
crw-rw-rw- 1 root root 10, 232 Nov 11 03:16 /dev/kvm
➜ ~ lxc restart tc1
➜ ~ lxc exec tc1 -- ls -ahl /dev/kvm
crw-rw---- 1 root kvm 10, 232 Nov 11 03:30 /dev/kvm
And all that using latest/edge
. So this is definetely a bug, I opened an issue so we can keep track of it, thanks for the report.
2 Likes
The reason why permissions and ownership change at system startup is because udev
includes predefined rules. You should be able to examine these rules in a container by running cat /usr/lib/udev/rules.d/50-udev-default.rules
. There should be no need to change the permissions of /dev/kvm
when used correctly. As of Ubuntu 18.04, installing qemu-kvm
is required: sudo apt install qemu-kvm
. If permission denied
is still observed, the user is likely not in the kvm
group, running sudo adduser $USER kvm
fixes this. If changing the permissions is required for your purposes, you can set up local udev
rules, or add the device by specifying a different path
inside the container.
2 Likes