diff --git a/defconfigs/blktests_scsi b/defconfigs/blktests_scsi index d49e72893..28462939e 100644 --- a/defconfigs/blktests_scsi +++ b/defconfigs/blktests_scsi @@ -9,7 +9,7 @@ CONFIG_GUESTFS_BRINGUP_DEBUG_1=y CONFIG_BOOTLINUX=y CONFIG_BOOTLINUX_9P=y -CONFIG_LIBVIRT_EXTRA_STORAGE_DRIVE_NVME=y +CONFIG_LIBVIRT_EXTRA_STORAGE_DRIVE_SCSI=y CONFIG_WORKFLOWS_TESTS=y CONFIG_WORKFLOWS_LINUX_TESTS=y diff --git a/kconfigs/Kconfig.libvirt b/kconfigs/Kconfig.libvirt index 8654eb9b1..f737b79fa 100644 --- a/kconfigs/Kconfig.libvirt +++ b/kconfigs/Kconfig.libvirt @@ -487,7 +487,7 @@ config LIBVIRT_MACHINE_TYPE_DEFAULT is the old i440x, so you will not get PCIe support. We only want to support PCI-E capable guests with libguest so the - deafult is not allowed on libguest. + default is not allowed on libguest. config LIBVIRT_MACHINE_TYPE_Q35 bool "q35" @@ -548,11 +548,18 @@ config LIBVIRT_EXTRA_STORAGE_DRIVE_VIRTIO you won't be able to test ZNS. config LIBVIRT_EXTRA_STORAGE_DRIVE_IDE - bool "ide" - output yaml - help - Use the QEMU ide driver for extra storage drives. This is useful for - really old Linux distributions that lack the virtio backend driver. + bool "ide" + output yaml + help + Use the QEMU ide driver for extra storage drives. This is useful for + really old Linux distributions that lack the virtio backend driver. + +config LIBVIRT_EXTRA_STORAGE_DRIVE_SCSI + bool "scsi" + output yaml + help + Use the QEMU SCSI driver for extra storage drives. This relies on a + virtio-scsi controller with scsi-hd devices attached. endchoice @@ -817,7 +824,7 @@ config LIBVIRT_AIO_MODE_NATIVE help Use the aio=native mode. For some older kernels it is known that native will cause corruption if used on ext4 or xfs filesystem if - you also use cace=none. This corruption is documented for RHEL: + you also use cache=none. This corruption is documented for RHEL: https://access.redhat.com/articles/41313 https://bugzilla.redhat.com/show_bug.cgi?id=615309 @@ -1104,7 +1111,7 @@ config LIBVIRT_ENABLE_GDB output yaml help Select this option if you want to enable debugging support for GDB. - By default , it is assumed that gdb is disabled since we dont want + By default , it is assumed that gdb is disabled since we don't want to complicate this for the CI runs. If enabled then libvirt guest xml for each guest will be configured to use gdb on a specific tcp port. @@ -1116,7 +1123,7 @@ config LIBVIRT_GDB_BASEPORT depends on LIBVIRT_ENABLE_GDB help This option defines the base port to be used for the GDB. - Esentially we need to make QEMU listen for an incoming connection from + Essentially we need to make QEMU listen for an incoming connection from gdb on a TCP port. The default port is chosen to be 1234. However we introduce variability for assigning the port to each guest by defining a base port and adding an index to it based on the number of libvrt guest diff --git a/playbooks/roles/gen_nodes/defaults/main.yml b/playbooks/roles/gen_nodes/defaults/main.yml index 6a899be45..2cf450b84 100644 --- a/playbooks/roles/gen_nodes/defaults/main.yml +++ b/playbooks/roles/gen_nodes/defaults/main.yml @@ -70,6 +70,7 @@ libvirt_extra_drive_id_prefix: 'drv' libvirt_extra_storage_drive_nvme: False libvirt_extra_storage_drive_virtio: False libvirt_extra_storage_drive_ide: False +libvirt_extra_storage_drive_scsi: False libvirt_extra_storage_aio_mode: "native" libvirt_extra_storage_aio_cache_mode: "none" # Note that NVMe on qemu does not allow the physical block size diff --git a/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 b/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 index 2298c3455..a52f55662 100644 --- a/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 +++ b/playbooks/roles/gen_nodes/templates/Vagrantfile.j2 @@ -244,7 +244,7 @@ Vagrant.configure("2") do |config| # # Just create a PCI or PCI-E root bus dedicated for extra drives. We # use 0x08 to place this PCI / PCI-E root bus as we know this is - # avilable on modern x86-64 systems. Eventually we may need to bump + # available on modern x86-64 systems. Eventually we may need to bump # this to 0x9, but it would be better instead to have vagant-libvirt # speak "add a new PCI or PCI-E root bus" and "add extra drives" whether # that is nvme or virtio. @@ -252,7 +252,7 @@ Vagrant.configure("2") do |config| {% if not libvirt_override_machine_type %} # For i440x on x86_64 (default on libvirt as of today) we use PCI, not # PCI-E. Below assumes i440x. i440x cannot support CXL as it does not - # suport PCI-E. + # support PCI-E. libvirt.qemuargs :value => "-device" libvirt.qemuargs :value => "pci-bridge,id=custom-pci-for-{{ extra_disk_driver }},chassis_nr=1,bus=pci.0,addr=0x8" {% else %} @@ -417,6 +417,13 @@ Vagrant.configure("2") do |config| libvirt.qemuargs :value => "file=#{extra_disk},format={{ libvirt_extra_drive_format }},if=none,aio={{ libvirt_extra_storage_aio_mode }},cache={{ libvirt_extra_storage_aio_cache_mode }},id=#{disk_id}" libvirt.qemuargs :value => "-device" libvirt.qemuargs :value => "virtio-blk-pci,drive=#{disk_id},id=virtio-#{disk_id},serial=#{serial_id},bus=#{bus_for_extra_drives},addr=#{pci_function},iothread=kdevops-virtio-iothread-#{port}#{extra_drive_largio_args},logical_block_size=#{virtio_lbs},physical_block_size=#{virtio_pbs}" +{% elif libvirt_extra_storage_drive_scsi %} + libvirt.qemuargs :value => "-device" + libvirt.qemuargs :value => "virtio-scsi-pci,id=scsi#{port},bus=#{bus_for_extra_drives},addr=#{pci_function}" + libvirt.qemuargs :value => "-drive" + libvirt.qemuargs :value => "file=#{extra_disk},format={{ libvirt_extra_drive_format }},if=none,aio={{ libvirt_extra_storage_aio_mode }},cache={{ libvirt_extra_storage_aio_cache_mode }},id=#{disk_id}" + libvirt.qemuargs :value => "-device" + libvirt.qemuargs :value => "scsi-hd,drive=#{disk_id},bus=scsi#{port}.0" {% elif libvirt_extra_storage_drive_nvme %} if ! largio nvme_lbs = "{{ libvirt_extra_storage_nvme_logical_block_size }}" diff --git a/playbooks/roles/gen_nodes/templates/drives.j2 b/playbooks/roles/gen_nodes/templates/drives.j2 index 8327cd430..1241b88a9 100644 --- a/playbooks/roles/gen_nodes/templates/drives.j2 +++ b/playbooks/roles/gen_nodes/templates/drives.j2 @@ -40,6 +40,26 @@ the drives can vary by type, so we have one macro by type of drive. {% endfor %} {%- endmacro -%} +{%- macro gen_drive_scsi(num_drives, + kdevops_storage_pool_path, + hostname, + libvirt_extra_drive_format, + libvirt_extra_storage_aio_mode, + libvirt_extra_storage_aio_cache_mode) -%} + +{% for n in range(0,num_drives) %} + + + + + + + + + +{% endfor %} + +{%- endmacro -%} {%- macro gen_drive_large_io_virtio(libvirt_largeio_logical_compat, libvirt_largeio_logical_compat_size, libvirt_largeio_pow_limit, @@ -49,7 +69,7 @@ the drives can vary by type, so we have one macro by type of drive. libvirt_extra_storage_aio_mode, libvirt_extra_storage_aio_cache_mode, kdevops_storage_pool_path) -%} - + {% set ns = namespace(lbs_idx=0) %} {% set max_pbs = libvirt_largeio_logical_compat_size * (2 ** libvirt_largeio_pow_limit) %} {% for n in range(0,libvirt_largeio_pow_limit+1) %} @@ -106,7 +126,7 @@ the drives can vary by type, so we have one macro by type of drive. libvirt_extra_storage_aio_mode, libvirt_extra_storage_aio_cache_mode, kdevops_storage_pool_path) -%} - + {% set ns = namespace(lbs_idx=0) %} {% set max_pbs = libvirt_largeio_logical_compat_size * (2 ** libvirt_largeio_pow_limit) %} {% for n in range(0,libvirt_largeio_pow_limit+1) %} diff --git a/playbooks/roles/gen_nodes/templates/gen_drives.j2 b/playbooks/roles/gen_nodes/templates/gen_drives.j2 index 874e5b062..2de13da4a 100644 --- a/playbooks/roles/gen_nodes/templates/gen_drives.j2 +++ b/playbooks/roles/gen_nodes/templates/gen_drives.j2 @@ -19,14 +19,21 @@ kdevops_storage_pool_path) }} {% else %} {{ drives.gen_drive_virtio(4, - kdevops_storage_pool_path, - hostname, - libvirt_extra_drive_format, - libvirt_extra_storage_aio_mode, - libvirt_extra_storage_aio_cache_mode, - libvirt_extra_storage_virtio_logical_block_size, - libvirt_extra_storage_virtio_physical_block_size) }} + kdevops_storage_pool_path, + hostname, + libvirt_extra_drive_format, + libvirt_extra_storage_aio_mode, + libvirt_extra_storage_aio_cache_mode, + libvirt_extra_storage_virtio_logical_block_size, + libvirt_extra_storage_virtio_physical_block_size) }} {% endif %} +{% elif libvirt_extra_storage_drive_scsi %} +{{ drives.gen_drive_scsi(4, + kdevops_storage_pool_path, + hostname, + libvirt_extra_drive_format, + libvirt_extra_storage_aio_mode, + libvirt_extra_storage_aio_cache_mode) }} {% elif libvirt_extra_storage_drive_nvme %} {% if libvirt_largeio_enable %} {{ drives.gen_drive_large_io_nvme(libvirt_largeio_logical_compat, diff --git a/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml b/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml index e5b88efef..91e105cd3 100644 --- a/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml +++ b/playbooks/roles/gen_nodes/templates/kdevops_nodes_split_start.j2.yaml @@ -76,6 +76,16 @@ vagrant_global: physical_block_size: {{ libvirt_extra_storage_virtio_physical_block_size }} logical_block_size: {{ libvirt_extra_storage_virtio_logical_block_size }} {% endif %} +{% elif libvirt_extra_storage_drive_scsi %} + extra_disks: + data: + size: 102400 + scratch: + size: 102400 + extra1: + size: 102400 + extra2: + size: 102400 {% elif libvirt_extra_storage_drive_nvme %} extra_disks: data: