diff --git a/DC-SLES-virtualization-disk-cache b/DC-SLES-virtualization-disk-cache
new file mode 100644
index 000000000..42d944dd4
--- /dev/null
+++ b/DC-SLES-virtualization-disk-cache
@@ -0,0 +1,10 @@
+MAIN="virtualization-disk-cache.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-io b/DC-SLES-virtualization-io
new file mode 100644
index 000000000..222a9c0a5
--- /dev/null
+++ b/DC-SLES-virtualization-io
@@ -0,0 +1,10 @@
+MAIN="virtualization-io.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-libvirt b/DC-SLES-virtualization-libvirt
new file mode 100644
index 000000000..5814ebc27
--- /dev/null
+++ b/DC-SLES-virtualization-libvirt
@@ -0,0 +1,10 @@
+MAIN="virtualization-libvirt.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-qemu b/DC-SLES-virtualization-qemu
new file mode 100644
index 000000000..b469cccb8
--- /dev/null
+++ b/DC-SLES-virtualization-qemu
@@ -0,0 +1,10 @@
+MAIN="virtualization-qemu.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-spice-removal b/DC-SLES-virtualization-spice-removal
new file mode 100644
index 000000000..0bd7bb003
--- /dev/null
+++ b/DC-SLES-virtualization-spice-removal
@@ -0,0 +1,10 @@
+MAIN="virtualization-spice-removal.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/DC-SLES-virtualization-support b/DC-SLES-virtualization-support
new file mode 100644
index 000000000..62d9d19c2
--- /dev/null
+++ b/DC-SLES-virtualization-support
@@ -0,0 +1,10 @@
+MAIN="virtualization-support.asm.xml"
+SRC_DIR="articles"
+IMG_SRC_DIR="images"
+PROFCONDITION="suse-product"
+PROFOS="sles"
+#PROFCONDITION="suse-product;beta"
+#PROFCONDITION="community-project"
+
+STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
+FALLBACK_STYLEROOT="/usr/share/xml/docbook/stylesheet/suse2022-ns"
diff --git a/articles/virtual-disk-cache-mode-configure.asm.xml b/articles/virtualization-disk-cache.asm.xml
similarity index 97%
rename from articles/virtual-disk-cache-mode-configure.asm.xml
rename to articles/virtualization-disk-cache.asm.xml
index feb55fc44..bcd413add 100644
--- a/articles/virtual-disk-cache-mode-configure.asm.xml
+++ b/articles/virtualization-disk-cache.asm.xml
@@ -34,7 +34,7 @@
-
+
Smart Docs
@@ -50,7 +50,7 @@
https://bugzilla.suse.com/enter_bug.cgiDocumentationSUSE Linux Enterprise Server 16.0
- tbazant@suse.com
+ souvik.sarkar@suse.comyes
diff --git a/articles/virtualization-io.asm.xml b/articles/virtualization-io.asm.xml
new file mode 100644
index 000000000..4a2253520
--- /dev/null
+++ b/articles/virtualization-io.asm.xml
@@ -0,0 +1,115 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Introduction to virtualization IO
+ Learn about the basic concepts of virtualization IO.
+
+
+ I/O virtualization enables virtual machines (VMs)
+ to efficiently interact with hardware devices (for example, network cards and storage) through the
+ hypervisor.
+
+
+
+
+ WHAT?
+
+
+ I/O virtualization is the mechanism enabling &vmguest; to interact with physical hardware
+ devices (for example, network interfaces and storage controllers) through a hypervisor, optimizing
+ resource usage while managing performance trade-offs between emulation, direct assignment
+ and hardware-assisted techniques.
+
+
+
+
+ WHY?
+
+
+ Full emulation I/O latency overhead and consumes significant CPU resources, making
+ hardware-assisted techniques (VFIO/SR-IOV) essential for latency-sensitive workloads such
+ as high-performance networking and storage.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-libvirt.asm.xml b/articles/virtualization-libvirt.asm.xml
new file mode 100644
index 000000000..6d281bdd1
--- /dev/null
+++ b/articles/virtualization-libvirt.asm.xml
@@ -0,0 +1,130 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Introduction to &libvirt;
+ Manage virtualization platforms (KVM, QEMU) with a unified API.
+
+
+ &libvirt; provides a consistent management interface for virtualization platforms, eliminating vendor-specific tool complexity.
+
+
+
+
+ WHAT?
+
+
+ &libvirt; is a software toolkit that provides a consistent API for managing virtual machines across multiple hypervisor platforms (KVM, QEMU).
+
+
+
+
+ WHY?
+
+
+ &libvirt; eliminates operational complexity of managing vendor-specific tools.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-qemu.asm.xml b/articles/virtualization-qemu.asm.xml
new file mode 100644
index 000000000..3afd47e0f
--- /dev/null
+++ b/articles/virtualization-qemu.asm.xml
@@ -0,0 +1,131 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Introduction to virtualization
+ Learn about the basic concepts of virtualization.
+
+
+ Using virtualization, you can run multiple virtual machines on a single bare-metal host to save resources
+
+
+
+
+ WHAT?
+
+
+ Using virtualization, you can run multiple virtual machines on a single
+ bare-metal host.
+
+
+
+
+ WHY?
+
+
+ Sharing host hardware between multiple virtualized guests significantly saves
+ resources.
+
+
+
+
+ EFFORT
+
+
+ It takes less than 15 minutes of your time to understand the concept of
+ virtualization.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-spice-removal.asm.xml b/articles/virtualization-spice-removal.asm.xml
new file mode 100644
index 000000000..cf43119c6
--- /dev/null
+++ b/articles/virtualization-spice-removal.asm.xml
@@ -0,0 +1,108 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Spice removal in virtualization tool stack
+ Migration guidance from Spice to alternative virtualization protocols.
+
+
+ Migration path from Spice to alternative virtualization protocols in SUSE product.
+
+
+
+
+ WHAT?
+
+
+ &sle; provides comprehensive virtualization support through &kvm; with alternative protocols replacing Spice, enabling efficient resource utilization and scalable cloud deployments without Spice dependency. This includes migration paths and alternative solutions for Spice-based virtualization.
+
+
+
+
+ WHY?
+
+
+ Migration from Spice to alternative virtualization protocols enables improved security, better performance, enhanced compatibility, and reduced maintenance overhead for enterprise cloud and containerized applications. The transition addresses deprecated technology concerns and provides modern alternatives for virtual desktop and console access.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization-support.asm.xml b/articles/virtualization-support.asm.xml
new file mode 100644
index 000000000..6963a1d77
--- /dev/null
+++ b/articles/virtualization-support.asm.xml
@@ -0,0 +1,108 @@
+
+
+
+ %entities;
+]>
+
+
+
+
+
+
+ Legal Notice
+
+
+ GNU Free Documentation License
+
+
+
+
+
+ Virtualization
+
+ 2025-11-27
+
+
+ Initial version
+
+
+
+
+
+
+
+
+
+ Products & Solutions
+
+
+
+
+ Virtualization
+
+
+
+
+
+ https://bugzilla.suse.com/enter_bug.cgi
+ Documentation
+ SUSE Linux Enterprise Server 16.0
+ souvik.sarkar@suse.com
+
+ yes
+
+
+
+
+ &x86-64;
+ &power;
+
+
+
+
+ &productname;
+
+
+
+ Virtualization support
+ Support for virtualization
+
+
+ Support for virtualization tool stack in SUSE products.
+
+
+
+
+ WHAT?
+
+
+ &suselinux; provides comprehensive virtualization support through &kvm;, and third party hypervisor as guest enabling efficient resource utilization and scalable cloud deployments.
+
+
+
+
+ WHY?
+
+
+ Virtualization enables hardware consolidation, improved resource utilization, reduced operational costs, and enhanced flexibility for enterprise cloud and containerized applications.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/articles/virtualization.asm.xml b/articles/virtualization.asm.xml
index 4b2e43c11..0a0cdffd4 100644
--- a/articles/virtualization.asm.xml
+++ b/articles/virtualization.asm.xml
@@ -15,21 +15,10 @@
-
-
-
-
-
-
-
-
-
-
-
-
+
+ Legal Notice
@@ -43,7 +32,7 @@
Virtualization
- 2025-11-04
+ 2025-11-27
Initial version
@@ -58,7 +47,7 @@
Products & Solutions
-
+
Virtualization
@@ -91,7 +80,7 @@
Learn about the basic concepts of virtualization.
- By means of virtualization, you can run multiple virtual machines on a single bare-metal host to save resources
+ Using virtualization, you can run multiple virtual machines on a single bare-metal host to save resources
@@ -99,7 +88,7 @@
WHAT?
- By means of virtualization, you can run multiple virtual machines on a single
+ Using virtualization, you can run multiple virtual machines on a single
bare-metal host.
@@ -128,17 +117,6 @@
-
-
-
-
-
-
-
-
-
-
-
diff --git a/images/default-vm-options.png b/images/default-vm-options.png
index 29edf5a1d..8fb4a486a 100644
Binary files a/images/default-vm-options.png and b/images/default-vm-options.png differ
diff --git a/images/qemu_sles_vnc.png b/images/qemu_sles_vnc.png
new file mode 100644
index 000000000..d5ce2f93c
Binary files /dev/null and b/images/qemu_sles_vnc.png differ
diff --git a/images/qemu_vnc_pwd.png b/images/qemu_vnc_pwd.png
new file mode 100644
index 000000000..9eb7519e6
Binary files /dev/null and b/images/qemu_vnc_pwd.png differ
diff --git a/images/qemu_win_sles.png b/images/qemu_win_sles.png
new file mode 100644
index 000000000..2226db2c1
Binary files /dev/null and b/images/qemu_win_sles.png differ
diff --git a/references/libvirt_configuration_virsh.xml b/references/libvirt_configuration_virsh.xml
index b8e02ab4b..60f829906 100644
--- a/references/libvirt_configuration_virsh.xml
+++ b/references/libvirt_configuration_virsh.xml
@@ -175,7 +175,7 @@ Product Name: Standard PC (Q35 + ICH9, 2009)
libvirt automatically enables a default set of
hypervisor features that are sufficient in most circumstances, but also
- allows enabling and disabling features as needed.
+ allows enabling and disabling features as needed.
Hypervisor features can be
configured with &virsh;. Look for the <features> element
in the &vmguest;'s configuration file and adjust its features as
@@ -909,9 +909,7 @@ Bus 001 Device 003: ID 0557:2221 ATEN Internati
An AMD64/Intel 64 host supporting hardware virtualization (AMD-V or
- Intel VT-x), see
- for more
- information
+ Intel VT-x). For more information, see the section Architecture Support in the article Virtualization Limits and Support.
@@ -1564,7 +1562,7 @@ Found 1 nodes in stdin:
&vmguest; contains PCI and SCSI devices, PCI and SCSI controllers are
created and managed automatically. libvirt also models
controllers that are hypervisor-specific, for example, a
- virtio-serial controller for KVM. Although the
+ virtio-serial controller for KVM. Although the
default controllers and their configuration are generally fine, there may
be use cases where controllers or their attributes need to be adjusted
manually.
diff --git a/references/libvirt_guest_installation.xml b/references/libvirt_guest_installation.xml
index 77799faf1..9086091d8 100644
--- a/references/libvirt_guest_installation.xml
+++ b/references/libvirt_guest_installation.xml
@@ -16,8 +16,8 @@
A &vmguest; consists of an image containing an operating system and data files and a
configuration file describing the &vmguest;'s virtual hardware resources. &vmguest;s are hosted
on and controlled by the &vmhost;. This section provides generalized instructions for
- installing a &vmguest;. For a list of supported &vmguest;s refer to
- .
+ installing a &vmguest;. For a list of supported &vmguest;s, refer to the
+ section Supported guest operating systems in the article Virtualization Limits and Support.
Virtual machines have few if any requirements above those required to run the operating system.
@@ -86,7 +86,7 @@
Specify the path on the &vmhost; to an ISO image containing the installation data.
If it is available as a volume in a libvirt storage pool, you can also select it
- using Browse.
+ using Browse.
@@ -427,7 +427,7 @@
It is possible to directly specify the Kernel and Initrd of the installer, for example,
- from a network source.
+ from a network source.
To pass additional boot parameters, use the option. This
@@ -470,7 +470,7 @@ network=vnet_nated
- &suse; supports &uefisecboot; on &x86-64; &kvm; guests only.
+ &suse; supports &uefisecboot; on &x86-64; &kvm; guests only.
@@ -698,7 +698,7 @@ network=vnet_nated
If you are installing from the command line, you need to set up the virtual CD/DVD drives
with the parameter rather than with . The
- device that is specified first is used for booting.
+ device that is specified first is used for booting.
diff --git a/references/qemu_guest_installation.xml b/references/qemu_guest_installation.xml
new file mode 100644
index 000000000..e0cfff9e0
--- /dev/null
+++ b/references/qemu_guest_installation.xml
@@ -0,0 +1,1061 @@
+
+
+ %entities;
+]>
+
+
+ Guest installation
+
+
+
+ yes
+
+
+
+ 2025-12-02
+
+
+
+
+
+
+
+ The libvirt-based tools such as
+ virt-manager and virt-install offer
+ convenient interfaces to set up and manage virtual machines. They act as a
+ kind of wrapper for the qemu-system-ARCHcommand.
+ However, it is also possible to use qemu-system-ARCH
+ directly without using
+ libvirt-based tools.
+
+
+ qemu-system-ARCH and libvirt
+
+ s created with
+ qemu-system-ARCH are not visible for the
+ libvirt-based tools.
+
+
+
+ Basic installation with qemu-system-ARCH
+
+
+ In the following example, a virtual machine for a &sls; 11 installation
+ is created. For detailed information on the commands, refer to the
+ respective man pages.
+
+
+
+ If you do not already have an image of a system that you want to run in a
+ virtualized environment, you need to create one from the installation
+ media. In such case, you need to prepare a hard disk image, and obtain an
+ image of the installation media or the media itself.
+
+
+
+ Create a hard disk with qemu-img.
+
+
+&prompt.user;qemu-img create -f raw /images/sles/hda 8G
+
+
+
+
+ The subcommand tells
+ qemu-img to create a new image.
+
+
+
+
+ Specify the disk's format with the parameter.
+
+
+
+
+ The full path to the image file.
+
+
+
+
+ The size of the image, 8 GB in this case. The image is created
+ as a that grows when
+ the disk is filled with data. The specified size defines the maximum
+ size to which the image file can grow.
+
+
+
+
+
+ After at least one hard disk image is created, you can set up a virtual
+ machine with qemu-system-ARCH that boots into the
+ installation system:
+
+
+&prompt.root;qemu-system-x86_64 -name "sles"-machine accel=kvm -M pc -m 768 \
+-smp 2 -boot d \
+-drive file=/images/sles/hda,if=virtio,index=0,media=disk,format=raw \
+-drive file=/isos/&installmedia;,index=1,media=cdrom \
+-net nic,model=virtio,macaddr=52:54:00:05:11:11 -net user \
+-vga cirrus -balloon virtio
+
+
+
+
+ Name of the virtual machine that is displayed in the window caption
+ and be used for the VNC server. This name must be unique.
+
+
+
+
+ Specifies the machine type. Use qemu-system-ARCH
+ to display a list of valid parameters.
+ pc is the default Standard PC.
+
+
+
+
+ Maximum amount of memory for the virtual machine.
+
+
+
+
+ Defines an SMP system with two processors.
+
+
+
+
+ Specifies the boot order. Valid values are a,
+ b (floppy 1 and 2), c (first
+ hard disk), d (first CD-ROM), or n to
+ p (Ether-boot from network adapter 1-3). Defaults
+ to c.
+
+
+
+
+ Defines the first (index=0) hard disk. It is
+ accessed as a paravirtualized (if=virtio) drive in
+ raw format.
+
+
+
+
+ The second (index=1) image drive acts as a CD-ROM.
+
+
+
+
+ Defines a paravirtualized (model=virtio) network
+ adapter with the MAC address 52:54:00:05:11:11. Be
+ sure to specify a unique MAC address, otherwise a network conflict
+ may occur.
+
+
+
+
+ Specifies the graphic card. If you specify none,
+ the graphic card is disabled.
+
+
+
+
+ Defines the paravirtualized balloon device that allows to dynamically
+ change the amount of memory (up to the maximum value specified with
+ the parameter ).
+
+
+
+
+
+ After the installation of the guest operating system finishes, you can
+ start the related virtual machine without the need to specify the CD-ROM
+ device:
+
+
+&prompt.root;qemu-system-x86_64 -name "sles" -machine type=pc,accel=kvm -m 768 \
+-smp 2 -boot c \
+-drive file=/images/sles/hda,if=virtio,index=0,media=disk,format=raw \
+-net nic,model=virtio,macaddr=52:54:00:05:11:11 \
+-vga cirrus -balloon virtio
+
+
+
+ Managing disk images with qemu-img
+
+
+ In the previous section (see
+ ), we used the
+ qemu-img command to create an image of a hard disk.
+ You can, however, use qemu-img for general disk image
+ manipulation. This section introduces qemu-img
+ subcommands to help manage the disk images flexibly.
+
+
+
+ General information on qemu-img invocation
+
+ qemu-img uses subcommands (like
+ zypper does) to do specific tasks. Each subcommand
+ understands a different set of options. Certain options are general and
+ used by more of these subcommands, while others are unique to the
+ related subcommand. See the qemu-img man page (man 1
+ qemu-img) for a list of all supported options.
+ qemu-img uses the following general syntax:
+
+&prompt.user;qemu-img subcommand [options]
+
+ and supports the following subcommands:
+
+
+
+ create
+
+
+ Creates a new disk image on the file system.
+
+
+
+
+ check
+
+
+ Checks an existing disk image for errors.
+
+
+
+
+ compare
+
+
+ Check if two images have the same content.
+
+
+
+
+ map
+
+
+ Dumps the metadata of the image file name and its backing file
+ chain.
+
+
+
+
+ amend
+
+
+ Amends the image format specific options for the image file name.
+
+
+
+
+ convert
+
+
+ Converts an existing disk image to a new one in a different
+ format.
+
+
+
+
+ info
+
+
+ Displays information about the relevant disk image.
+
+
+
+
+ snapshot
+
+
+ Manages snapshots of existing disk images.
+
+
+
+
+ commit
+
+
+ Applies changes made to an existing disk image.
+
+
+
+
+ rebase
+
+
+ Creates a new base image based on an existing image.
+
+
+
+
+ resize
+
+
+ Increases or decreases the size of an existing image.
+
+
+
+
+
+
+
+ Creating, converting, and checking disk images
+
+ This section describes how to create disk images, check their
+ condition, convert a disk image from one format to another, and get
+ detailed information about a particular disk image.
+
+
+ qemu-img create
+
+ Use qemu-img create to create a new disk image for
+ your &vmguest; operating system. The command uses the following
+ syntax:
+
+&prompt.user;qemu-img create -f fmt -o options fname size
+
+
+
+ The format of the target image. Supported formats are
+ raw and qcow2.
+
+
+
+
+ Certain image formats support additional options to be passed on the
+ command line. You can specify them here with the
+ -o option. The raw image
+ format supports only the size option, so it is
+ possible to insert -o size=8G instead of
+ adding the size option at the end of the command.
+
+
+
+
+ Path to the target disk image to be created.
+
+
+
+
+ Size of the target disk image (if not already specified with the
+ -o size=<image_size> option. Optional
+ suffixes for the image size are K (kilobyte),
+ M (megabyte), G (gigabyte),
+ or T (terabyte).
+
+
+
+
+ To create a new disk image sles.raw in the
+ directory /images growing up to a maximum size
+ of 4 GB, run the following command:
+
+&prompt.user;qemu-img create -f raw -o size=4G /images/sles.raw
+Formatting '/images/sles.raw', fmt=raw size=4294967296
+
+&prompt.user;ls -l /images/sles.raw
+-rw-r--r-- 1 tux users 4294967296 Nov 15 15:56 /images/sles.raw
+
+&prompt.user;qemu-img info /images/sles.raw
+image: /images/sles11.raw
+file format: raw
+virtual size: 4.0G (4294967296 bytes)
+disk size: 0
+
+
+ As you can see, the virtual size of the newly
+ created image is 4 GB, but the actual reported disk size is 0 as no
+ data has been written to the image yet.
+
+
+ &vmguest; images on the Btrfs file system
+
+ If you need to create a disk image on the Btrfs file system, you
+ can use to reduce the performance
+ overhead created by the copy-on-write feature of Btrfs:
+
+&prompt.user;qemu-img create -o nocow=on test.img 8G
+
+ If you, however, want to use copy-on-write, for example, for
+ creating snapshots or sharing them across virtual machines, then
+ leave the command line without the option.
+
+
+
+
+ qemu-img convert
+
+ Use qemu-img convert to convert disk images to
+ another format. To get a complete list of image formats supported by
+ &qemu;, run qemu-img and look
+ at the last line of the output. The command uses the following
+ syntax:
+
+&prompt.user;qemu-img convert -c -f fmt -O out_fmt -o options fname out_fname
+
+
+
+ Applies compression on the target disk image. Only
+ qcow and qcow2 formats
+ support compression.
+
+
+
+
+ The format of the source disk image. It is normally autodetected
+ and can therefore be omitted.
+
+
+
+
+ The format of the target disk image.
+
+
+
+
+ Specify additional options relevant for the target image format.
+ Use -o ? to view the list of options supported
+ by the target image format.
+
+
+
+
+ Path to the source disk image to be converted.
+
+
+
+
+ Path to the converted target disk image.
+
+
+
+&prompt.user;qemu-img convert -O vmdk /images/sles.raw \
+/images/sles.vmdk
+
+&prompt.user;ls -l /images/
+-rw-r--r-- 1 tux users 4294967296 16. lis 10.50 sles.raw
+-rw-r--r-- 1 tux users 2574450688 16. lis 14.18 sles.vmdk
+
+
+ To see a list of options relevant for the selected target image
+ format, run the following command (replace vmdk
+ with your image format):
+
+&prompt.user;qemu-img convert -O vmdk /images/sles.raw \
+/images/sles.vmdk -o ?
+Supported options:
+size Virtual disk size
+backing_file File name of a base image
+compat6 VMDK version 6 image
+subformat VMDK flat extent format, can be one of {monolithicSparse \
+ (default) | monolithicFlat | twoGbMaxExtentSparse | twoGbMaxExtentFlat}
+scsi SCSI image
+
+
+
+ qemu-img check
+
+ Use qemu-img check to check the existing disk
+ image for errors. Not all disk image formats support this feature.
+ The command uses the following syntax:
+
+&prompt.user;qemu-img check -f fmt fname
+
+
+
+ The format of the source disk image. It is normally autodetected
+ and can therefore be omitted.
+
+
+
+
+ Path to the source disk image to be checked.
+
+
+
+
+ If no error is found, the command returns no output. Otherwise, the
+ type and number of errors found is shown.
+
+&prompt.user;qemu-img check -f qcow2 /images/sles.qcow2
+ERROR: invalid cluster offset=0x2af0000
+[...]
+ERROR: invalid cluster offset=0x34ab0000
+378 errors were found on the image.
+
+
+
+ Increasing the size of an existing disk image
+
+ When creating a new image, you must specify its maximum size before
+ the image is created (see
+ ). After
+ you have installed the &vmguest; and have been using it for certain
+ time, the initial size of the image may no longer be sufficient. In
+ that case, add more space to it.
+
+
+ To increase the size of an existing disk image by 2 gigabytes, use:
+
+&prompt.user;qemu-img resize /images/sles.raw +2GB
+
+
+ You can resize the disk image using the formats
+ raw and qcow2. To resize an
+ image in another format, convert it to a supported format with
+ qemu-img convert first.
+
+
+
+ The image now contains an empty space of 2 GB after the final
+ partition. You can resize the existing partitions or add new ones.
+
+
+
+ Advanced options for the qcow2 file format
+
+ qcow2 is the main disk image format used by
+ &qemu;. Its size grows on demand, and the disk space is only
+ allocated when it is needed by the virtual machine.
+
+
+ A qcow2 formatted file is organized in units of constant size. These
+ units are called clusters. Viewed from the guest
+ side, the virtual disk is also divided into clusters of the same
+ size. &qemu; defaults to 64 kB clusters, but you can specify a
+ different value when creating a new image:
+
+&prompt.user;qemu-img create -f qcow2 -o cluster_size=128K virt_disk.qcow2 4G
+
+ A qcow2 image contains a set of tables organized in two levels that
+ are called the L1 and L2 tables. There is just one L1 table per disk
+ image, while there can be many L2 tables depending on how big the
+ image is.
+
+
+ To read or write data to the virtual disk, &qemu; needs to read its
+ corresponding L2 table to find out the relevant data location.
+ Because reading the table for each I/O operation consumes system
+ resources, &qemu; keeps a cache of L2 tables in memory to speed up
+ disk access.
+
+
+ Choosing the right cache size
+
+ The cache size relates to the amount of allocated space. L2 cache
+ can map the following amount of virtual disk:
+
+disk_size = l2_cache_size * cluster_size / 8
+
+ With the default 64 kB of cluster size, that is
+
+disk_size = l2_cache_size * 8192
+
+ Therefore, to have a cache that maps n gigabytes
+ of disk space with the default cluster size, you need
+
+l2_cache_size = disk_size_GB * 131072
+
+ &qemu; uses 1 MB (1048576 bytes) of L2 cache by default.
+ Following the above formulas, 1 MB of L2 cache covers
+ 8 GB (1048576 / 131072) of virtual disk. This means that the
+ performance is fine with the default L2 cache size if your virtual
+ disk size is up to 8 GB. For larger disks, you can speed up
+ the disk access by increasing the L2 cache size.
+
+
+
+ Configuring the cache size
+
+ You can use the option on the &qemu;
+ command line to specify the cache sizes. Alternatively when
+ communicating via QMP, use the blockdev-add
+ command. For more information on QMP, see
+ .
+
+
+ The following options configure the cache size for the virtual
+ guest:
+
+
+
+ l2-cache-size
+
+
+ The maximum size of the L2 table cache.
+
+
+
+
+ refcount-cache-size
+
+
+ The maximum size of the refcount block
+ cache. For more information on refcount,
+ see
+ .
+
+
+
+
+ cache-size
+
+
+ The maximum size of both caches combined.
+
+
+
+
+
+ When specifying values for the options above, be aware of the
+ following:
+
+
+
+
+ The size of both the L2 and refcount block caches needs to be a
+ multiple of the cluster size.
+
+
+
+
+ If you only set one of the options, &qemu; automatically
+ adjusts the other options so that the L2 cache is 4 times
+ bigger than the refcount cache.
+
+
+
+
+ The refcount cache is used much less often than the L2 cache,
+ therefore you can keep it small:
+
+&prompt.root;qemu-system-ARCH [...] \
+ -drive file=disk_image.qcow2,l2-cache-size=4194304,refcount-cache-size=262144
+
+
+ Reducing the memory usage
+
+ The larger the cache, the more memory it consumes. There is a
+ separate L2 cache for each qcow2 file. When using a lot of big disk
+ images, you may need a considerably large amount of memory. Memory
+ consumption is even worse if you add backing files
+ () and
+ snapshots (see
+ ) to the
+ guest's setup chain.
+
+
+ This is why &qemu; introduced the
+ setting. It defines an
+ interval in seconds after which all cache entries that have not
+ been accessed are removed from memory.
+
+
+ The following example removes all unused cache entries every 10
+ minutes:
+
+&prompt.root;qemu-system-ARCH [...] -drive file=hd.qcow2,cache-clean-interval=600
+
+ If this option is not set, the default value is 0 and it disables
+ this feature.
+
+
+
+
+
+
+ Managing snapshots of virtual machines with qemu-img
+
+ snapshots are snapshots of the complete
+ environment in which a &vmguest; is running. The snapshot includes the
+ state of the processor (CPU), memory (RAM), devices, and all writable
+ disks.
+
+
+ Snapshots are helpful when you need to save your virtual machine in a
+ particular state. For example, after you configured network services on
+ a virtualized server and want to quickly start the virtual machine in
+ the same state you last saved it. Or you can create a snapshot after
+ the virtual machine has been powered off to create a backup state
+ before you try something experimental and make &vmguest; unstable. This
+ section introduces the latter case, while the former is described in
+ .
+
+
+ To use snapshots, your &vmguest; must contain at least one writable
+ hard disk image in qcow2 format. This device is
+ normally the first virtual hard disk.
+
+
+ snapshots are created with the
+ savevm command in the interactive &qemu; monitor. To
+ make identifying a particular snapshot easier, you can assign it a
+ tag. For more information on &qemu; monitor, see
+ .
+
+
+ Once your qcow2 disk image contains saved snapshots,
+ you can inspect them with the qemu-img snapshot
+ command.
+
+
+ Shut down the &vmguest;
+
+ Do not create or delete virtual machine snapshots with the
+ qemu-img snapshot command while the virtual
+ machine is running. Otherwise, you may damage the disk image with the
+ state of the virtual machine saved.
+
+
+
+ Listing existing snapshots
+
+ Use qemu-img snapshot -l
+ DISK_IMAGE to view a list of all existing
+ snapshots saved in the disk_image image. You can
+ get the list even while the &vmguest; is running.
+
+&prompt.user;qemu-img snapshot -l /images/sles.qcow2
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 booting 4.4M 2013-11-22 10:51:10 00:00:20.476
+2 booted 184M 2013-11-22 10:53:03 00:02:05.394
+3 logged_in 273M 2013-11-22 11:00:25 00:04:34.843
+4 ff_and_term_running 372M 2013-11-22 11:12:27 00:08:44.965
+
+
+
+ Unique auto-incremented identification number of the snapshot.
+
+
+
+
+ Unique description string of the snapshot. It is meant as a
+ human-readable version of the ID.
+
+
+
+
+ The disk space occupied by the snapshot. The more
+ memory is consumed by running applications, the bigger the
+ snapshot is.
+
+
+
+
+ Time and date the snapshot was created.
+
+
+
+
+ The current state of the virtual machine's clock.
+
+
+
+
+
+ Creating snapshots of a powered-off virtual machine
+
+ Use qemu-img snapshot -c
+ SNAPSHOT_TITLE
+ DISK_IMAGE to create a snapshot of the
+ current state of a virtual machine that was previously powered off.
+
+&prompt.user;qemu-img snapshot -c backup_snapshot /images/sles.qcow2
+&prompt.user;qemu-img snapshot -l /images/sles.qcow2
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 booting 4.4M 2013-11-22 10:51:10 00:00:20.476
+2 booted 184M 2013-11-22 10:53:03 00:02:05.394
+3 logged_in 273M 2013-11-22 11:00:25 00:04:34.843
+4 ff_and_term_running 372M 2013-11-22 11:12:27 00:08:44.965
+5 backup_snapshot 0 2013-11-22 14:14:00 00:00:00.000
+
+ If something breaks in your &vmguest; and you need to restore the
+ state of the saved snapshot (ID 5 in our example), power off your
+ &vmguest; and execute the following command:
+
+&prompt.user;qemu-img snapshot -a 5 /images/sles.qcow2
+
+ The next time you run the virtual machine with
+ qemu-system-ARCH, it will be in the state of
+ snapshot number 5.
+
+
+
+ The qemu-img snapshot -c command is not related
+ to the savevm command of &qemu; monitor (see
+ ). For example, you cannot apply
+ a snapshot with qemu-img snapshot -a on a
+ snapshot created with savevm in &qemu;'s
+ monitor.
+
+
+
+
+ Deleting snapshots
+
+ Use qemu-img snapshot -d
+ SNAPSHOT_ID
+ DISK_IMAGE to delete old or unneeded
+ snapshots of a virtual machine. This saves disk space inside the
+ qcow2 disk image as the space occupied by the
+ snapshot data is restored:
+
+&prompt.user;qemu-img snapshot -d 2 /images/sles.qcow2
+
+
+
+
+ Manipulate disk images effectively
+
+ Imagine the following real-life situation: you are a server
+ administrator who runs and manages several virtualized operating
+ systems. One group of these systems is based on one specific
+ distribution, while another group (or groups) is based on different
+ versions of the distribution or even on a different (and maybe
+ non-Unix) platform. To make the case even more complex, individual
+ virtual guest systems based on the same distribution differ according
+ to the department and deployment. A file server typically uses a
+ different setup and services than a Web server does, while both may
+ still be based on
+ &slsreg;&opensuse;.
+
+
+ With &qemu; it is possible to create base disk images.
+ You can use them as template virtual machines. These base images save
+ you plenty of time because you do not need to install the same
+ operating system more than once.
+
+
+ Base and derived images
+
+ First, build a disk image as usual and install the target system on
+ it. For more information, see
+ and
+ . Then build a
+ new image while using the first one as a base image. The base image
+ is also called a backing file. After your new
+ derived image is built, never boot the base
+ image again, but boot the derived image instead. Several derived
+ images may depend on one base image at the same time. Therefore,
+ changing the base image can damage the dependencies. While using your
+ derived image, &qemu; writes changes to it and uses the base image
+ only for reading.
+
+
+ It is a good practice to create a base image from a freshly installed
+ (and, if needed, registered) operating system with no patches applied
+ and no additional applications installed or removed. Later on, you
+ can create another base image with the latest patches applied and
+ based on the original base image.
+
+
+
+ Creating derived images
+
+
+ While you can use the raw format for base
+ images, you cannot use it for derived images because the
+ raw format does not support the
+ backing_file option. Use, for example, the
+ qcow2 format for the derived images.
+
+
+
+ For example, /images/sles_base.raw is the base
+ image holding a freshly installed system.
+
+&prompt.user;qemu-img info /images/sles_base.raw
+image: /images/sles_base.raw
+file format: raw
+virtual size: 4.0G (4294967296 bytes)
+disk size: 2.4G
+
+ The image's reserved size is 4 GB, the actual size is 2.4 GB, and its
+ format is raw. Create an image derived from the
+ /images/sles_base.raw base image with:
+
+&prompt.user;qemu-img create -f qcow2 /images/sles_derived.qcow2 \
+-o backing_file=/images/sles_base.raw
+Formatting '/images/sles_derived.qcow2', fmt=qcow2 size=4294967296 \
+backing_file='/images/sles_base.raw' encryption=off cluster_size=0
+
+
+ Look at the derived image details:
+
+&prompt.user;qemu-img info /images/sles_derived.qcow2
+image: /images/sles_derived.qcow2
+file format: qcow2
+virtual size: 4.0G (4294967296 bytes)
+disk size: 140K
+cluster_size: 65536
+backing file: /images/sles_base.raw \
+(actual path: /images/sles_base.raw)
+
+ Although the reserved size of the derived image is the same as the
+ size of the base image (4 GB), the actual size is 140 KB only. The
+ reason is that only changes made to the system inside the derived
+ image are saved. Run the derived virtual machine, register it, if
+ needed, and apply the latest patches. Do any other changes in the
+ system such as removing unneeded or installing new software packages.
+ Then shut the &vmguest; down and examine its details once more:
+
+&prompt.user;qemu-img info /images/sles_derived.qcow2
+image: /images/sles_derived.qcow2
+file format: qcow2
+virtual size: 4.0G (4294967296 bytes)
+disk size: 1.1G
+cluster_size: 65536
+backing file: /images/sles_base.raw \
+(actual path: /images/sles_base.raw)
+
+ The disk size value has grown to 1.1 GB, which is
+ the disk space occupied by the changes on the file system compared to
+ the base image.
+
+
+
+ Rebasing derived images
+
+ After you have modified the derived image (applied patches, installed
+ specific applications, changed environment settings, etc.), it
+ reaches the desired state. At that point, you can merge the original
+ base image and the derived image to create a new base image.
+
+
+ Your original base image (/images/sles_base.raw)
+ holds a freshly installed system. It can be a template for new
+ modified base images, while the new one can contain the same system
+ as the first one plus all security and update patches applied, for
+ example. After you have created this new base image, you can use it
+ as a template for more specialized derived images as well. The new
+ base image becomes independent of the original one. The process of
+ creating base images from derived ones is called
+ rebasing:
+
+&prompt.user;qemu-img convert /images/sles_derived.qcow2 \
+-O raw /images/sles_base2.raw
+
+ This command created the new base image
+ /images/sles_base2.raw using the
+ raw format.
+
+&prompt.user;qemu-img info /images/sles_base2.raw
+image: /images/sles11_base2.raw
+file format: raw
+virtual size: 4.0G (4294967296 bytes)
+disk size: 2.8G
+
+ The new image is 0.4 gigabytes bigger than the original base image.
+ It uses no backing file, and you can easily create new derived images
+ based upon it. This lets you create a sophisticated hierarchy of
+ virtual disk images for your organization, saving a lot of time and
+ work.
+
+
+
+ Mounting an image on a &vmhost;
+
+ It can be useful to mount a virtual disk image under the host system.
+
+
+
+ Linux systems can mount an internal partition of a
+ raw disk image using a loopback device. The first
+ example procedure is more complex but more illustrative, while the
+ second one is straightforward:
+
+
+ Mounting disk image by calculating partition offset
+
+
+ Set a loop device on the disk image whose
+ partition you want to mount.
+
+&prompt.user;losetup /dev/loop0 /images/sles_base.raw
+
+
+
+ Find the sector size and the starting
+ sector number of the partition you want to
+ mount.
+
+&prompt.user;fdisk -lu /dev/loop0
+
+Disk /dev/loop0: 4294 MB, 4294967296 bytes
+255 heads, 63 sectors/track, 522 cylinders, total 8388608 sectors
+Units = sectors of 1 * 512 = 512 bytes
+Disk identifier: 0x000ceca8
+
+ Device Boot Start End Blocks Id System
+/dev/loop0p1 63 1542239 771088+ 82 Linux swap
+/dev/loop0p2 * 1542240 8385929 3421845 83 Linux
+
+
+
+ The disk sector size.
+
+
+
+
+ The starting sector of the partition.
+
+
+
+
+
+
+ Calculate the partition start offset:
+
+
+ sector_size * sector_start = 512 * 1542240 = 789626880
+
+
+
+
+
+ Delete the loop and mount the partition inside the disk image
+ with the calculated offset on a prepared directory.
+
+&prompt.user;losetup -d /dev/loop0
+&prompt.user;mount -o loop,offset=789626880 \
+/images/sles_base.raw /mnt/sles/
+&prompt.user;ls -l /mnt/sles/
+total 112
+drwxr-xr-x 2 root root 4096 Nov 16 10:02 bin
+drwxr-xr-x 3 root root 4096 Nov 16 10:27 boot
+drwxr-xr-x 5 root root 4096 Nov 16 09:11 dev
+[...]
+drwxrwxrwt 14 root root 4096 Nov 24 09:50 tmp
+drwxr-xr-x 12 root root 4096 Nov 16 09:16 usr
+drwxr-xr-x 15 root root 4096 Nov 16 09:22 var
+
+
+
+
+ Copy one or more files onto the mounted partition and unmount it
+ when finished.
+
+&prompt.user;cp /etc/X11/xorg.conf /mnt/sles/root/tmp
+&prompt.user;ls -l /mnt/sles/root/tmp
+&prompt.user;umount /mnt/sles/
+
+
+
+ Do not write to images currently in use
+
+ Never mount a partition of an image of a running virtual machine in
+ a read-write mode. This could corrupt the
+ partition and break the whole &vmguest;.
+
+
+
+
+
+
diff --git a/references/qemu_host_installation.xml b/references/qemu_host_installation.xml
new file mode 100644
index 000000000..27f0ee237
--- /dev/null
+++ b/references/qemu_host_installation.xml
@@ -0,0 +1,674 @@
+
+
+ %entities;
+]>
+
+
+ Setting up a &kvm; &vmhost;
+
+
+
+ yes
+
+
+
+ This section documents how to set up and use &productname; &productnumber;
+ as a &qemu;-&kvm; based virtual machine host.
+
+
+ Resources
+
+ The virtual guest system needs the same hardware resources as if it were
+ installed on a physical machine. The more guests you plan to run on the
+ host system, the more hardware resources—CPU, disk, memory and
+ network—you need to add to the &vmhost;.
+
+
+
+ CPU support for virtualization
+
+
+ To run &kvm;, your CPU must support virtualization, and virtualization
+ needs to be enabled in BIOS. The file /proc/cpuinfo
+ includes information about your CPU features.
+
+
+
+ To find out whether your system supports virtualization, see the section Architecture
+ Support in the article Virtualization Limits and Support.
+
+
+
+ Required software
+
+
+ The &kvm; host requires several packages to be installed. To install all
+ necessary packages, do the following:
+
+
+
+
+
+ install the patterns-server-kvm_server and
+ patterns-server-kvm_tools.
+
+
+
+
+ Create a Network Bridge. If you do
+ not plan to dedicate an additional physical network card to your
+ virtual guests, network bridge is a standard way to connect the guest
+ machines to the network.
+
+
+
+
+ After all the required packages are installed (and new network setup
+ activated), try to load the &kvm; kernel module relevant for your CPU
+ type—kvm_intel or
+ kvm_amd:
+
+&prompt.root;modprobe kvm_amd
+
+ Check if the module is loaded into memory:
+
+ &prompt.user;lsmod | grep kvm
+kvm_amd 237568 20
+kvm 1376256 17 kvm_amd
+
+ Now the &kvm; host is ready to serve &kvm; &vmguest;s.
+
+
+
+
+
+ &kvm; host-specific features
+
+
+ You can improve the performance of &kvm;-based &vmguest;s by letting them
+ fully use specific features of the &vmhost;'s hardware
+ (paravirtualization). This section introduces
+ techniques to make the guests access the physical host's hardware
+ directly—without the emulation layer—to make the most use of
+ it.
+
+
+
+
+ Examples included in this section assume basic knowledge of the
+ qemu-system-ARCH command
+ line options.
+
+
+
+
+ Using the host storage with virtio-scsi
+
+ virtio-scsi is an advanced storage stack for
+ &kvm;. It replaces the former virtio-blk stack
+ for SCSI devices pass-through. It has several advantages over
+ virtio-blk:
+
+
+
+ Improved scalability
+
+
+ &kvm; guests have a limited number of PCI controllers, which
+ results in a limited number of attached devices.
+ virtio-scsi solves this limitation by
+ grouping multiple storage devices on a single controller. Each
+ device on a virtio-scsi controller is
+ represented as a logical unit, or LUN.
+
+
+
+
+ Standard command set
+
+
+ virtio-blk uses a small set of commands
+ that need to be known to both the
+ virtio-blk driver and the virtual
+ machine monitor, and so introducing a new command requires
+ updating both the driver and the monitor.
+
+
+ By comparison, virtio-scsi does not
+ define commands, but rather a transport protocol for these
+ commands following the industry-standard SCSI specification. This
+ approach is shared with other technologies, such as Fibre
+ Channel, ATAPI and USB devices.
+
+
+
+
+ Device naming
+
+
+ virtio-blk devices are presented inside
+ the guest as
+ /dev/vdX, which
+ is different from device names in physical systems and may cause
+ migration problems.
+
+
+ virtio-scsi keeps the device names
+ identical to those on physical systems, making the virtual
+ machines easily relocatable.
+
+
+
+
+ SCSI device pass-through
+
+
+ For virtual disks backed by a whole LUN on the host, it is
+ preferable for the guest to send SCSI commands directly to the
+ LUN (pass-through). This is limited in
+ virtio-blk, as guests need to use the
+ virtio-blk protocol instead of SCSI command pass-through, and,
+ moreover, it is not available for Windows guests.
+ virtio-scsi natively removes these
+ limitations.
+
+
+
+
+
+ virtio-scsi usage
+
+ &kvm; supports the SCSI pass-through feature with the
+ virtio-scsi-pci device:
+
+&prompt.root;qemu-system-x86_64 [...] \
+-device virtio-scsi-pci,id=scsi
+
+
+
+
+ Accelerated networking with vhost-net
+
+ The vhost-net module is used to accelerate
+ &kvm;'s paravirtualized network drivers. It provides better latency and
+ greater network throughput. Use the vhost-net driver
+ by starting the guest with the following example command line:
+
+&prompt.root;qemu-system-x86_64 [...] \
+-netdev tap,id=guest0,vhost=on,script=no \
+-net nic,model=virtio,netdev=guest0,macaddr=00:16:35:AF:94:4B
+
+ guest0 is an identification string of the
+ vhost-driven device.
+
+
+
+
+ Scaling network performance with multiqueue virtio-net
+
+ As the number of virtual CPUs increases in &vmguest;s, &qemu; offers a
+ way of improving the network performance using
+ multiqueue. Multiqueue virtio-net scales the
+ network performance by allowing &vmguest; virtual CPUs to transfer
+ packets in parallel. Multiqueue support is required on both the
+ &vmhost; and &vmguest; sides.
+
+
+ Performance benefit
+
+ The multiqueue virtio-net solution is most beneficial in the
+ following cases:
+
+
+
+
+ Network traffic packets are large.
+
+
+
+
+ &vmguest; has many connections active at the same time, mainly
+ between the guest systems, or between the guest and the host, or
+ between the guest and an external system.
+
+
+
+
+ The number of active queues is equal to the number of virtual
+ CPUs in the &vmguest;.
+
+
+
+
+
+
+ While multiqueue virtio-net increases the total network throughput,
+ it increases CPU consumption as it uses of the virtual CPU's power.
+
+
+
+ How to enable multiqueue virtio-net
+
+ The following procedure lists important steps to enable the
+ multiqueue feature with qemu-system-ARCH. It
+ assumes that a tap network device with multiqueue capability
+ (supported since kernel version 3.8) is set up on the &vmhost;.
+
+
+
+ In qemu-system-ARCH, enable multiqueue for the
+ tap device:
+
+-netdev tap,vhost=on,queues=2*N
+
+ where N stands for the number of queue pairs.
+
+
+
+
+ In qemu-system-ARCH, enable multiqueue and
+ specify MSI-X (Message Signaled Interrupt) vectors for the
+ virtio-net-pci device:
+
+-device virtio-net-pci,mq=on,vectors=2*N+2
+
+ where the formula for the number of MSI-X vectors results from: N
+ vectors for TX (transmit) queues, N for RX (receive) queues, one
+ for configuration purposes, and one for possible VQ (vector
+ quantization) control.
+
+
+
+
+ In &vmguest;, enable multiqueue on the relevant network interface
+ (eth0 in this example):
+
+&prompt.sudo;ethtool -L eth0 combined 2*N
+
+
+
+ The resulting qemu-system-ARCH command line looks
+ similar to the following example:
+
+qemu-system-x86_64 [...] -netdev tap,id=guest0,queues=8,vhost=on \
+-device virtio-net-pci,netdev=guest0,mq=on,vectors=10
+
+ The id of the network device
+ (guest0) needs to be identical for both options.
+
+
+ Inside the running &vmguest;, specify the following command with
+ &rootuser; privileges:
+
+&prompt.sudo;ethtool -L eth0 combined 8
+
+ Now the guest system networking uses the multiqueue support from the
+ qemu-system-ARCH hypervisor.
+
+
+
+
+ VFIO: secure direct access to devices
+
+ Directly assigning a PCI device to a &vmguest; (PCI pass-through)
+ avoids performance issues caused by avoiding any emulation in
+ performance-critical paths. VFIO replaces the traditional &kvm;
+ &pciback; device assignment. A prerequisite for this feature is a
+ &vmhost; configuration as described in
+ .
+
+
+ To be able to assign a PCI device via VFIO to a &vmguest;, you need to
+ find out which IOMMU Group it belongs to. The
+ (input/output memory
+ management unit that connects a direct memory access-capable I/O bus to
+ the main memory) API supports the notion of groups. A group is a set of
+ devices that can be isolated from all other devices in the system.
+ Groups are therefore the unit of ownership used by
+ .
+
+
+ Assigning a PCI device to a &vmguest; via VFIO
+
+
+ Identify the host PCI device to assign to the guest.
+
+&prompt.sudo;lspci -nn
+[...]
+00:10.0 Ethernet controller [0200]: Intel Corporation 82576 \
+Virtual Function [8086:10ca] (rev 01)
+[...]
+
+ Note down the device ID, 00:10.0 in this example,
+ and the vendor ID (8086:10ca).
+
+
+
+
+ Find the IOMMU group of this device:
+
+&prompt.sudo;readlink /sys/bus/pci/devices/0000\:00\:10.0/iommu_group
+../../../kernel/iommu_groups/20
+
+ The IOMMU group for this device is 20. Now you
+ can check the devices belonging to the same IOMMU group:
+
+&prompt.sudo;ls -l /sys/bus/pci/devices/0000\:01\:10.0/iommu_group/devices/
+[...] 0000:00:1e.0 -> ../../../../devices/pci0000:00/0000:00:1e.0
+[...] 0000:01:10.0 -> ../../../../devices/pci0000:00/0000:00:1e.0/0000:01:10.0
+[...] 0000:01:10.1 -> ../../../../devices/pci0000:00/0000:00:1e.0/0000:01:10.1
+
+
+
+ Unbind the device from the device driver:
+
+&prompt.sudo;echo "0000:01:10.0" > /sys/bus/pci/devices/0000\:01\:10.0/driver/unbind
+
+
+
+ Bind the device to the vfio-pci driver using the vendor ID from
+ step 1:
+
+&prompt.sudo;echo "8086 153a" > /sys/bus/pci/drivers/vfio-pci/new_id
+
+ A new device
+ /dev/vfio/IOMMU_GROUP
+ is created as a result, /dev/vfio/20 in this
+ case.
+
+
+
+
+ Change the ownership of the newly created device:
+
+&prompt.sudo;chown qemu.qemu /dev/vfio/DEVICE
+
+
+
+ Now run the &vmguest; with the PCI device assigned.
+
+&prompt.sudo;qemu-system-ARCH [...] -device
+ vfio-pci,host=00:10.0,id=ID
+
+
+
+ No hotplugging
+
+ As of &productname; &productnumber;, hotplugging of PCI devices
+ passed to a &vmguest; via VFIO is not supported.
+
+
+
+ You can find more detailed information on the
+ driver in the
+ /usr/src/linux/Documentation/vfio.txt file
+ (package kernel-source needs to be installed).
+
+
+
+
+ VirtFS: sharing directories between host and guests
+
+ &vmguest;s normally run in a separate computing space—they are
+ provided their own memory range, dedicated CPUs, and file system space.
+ The ability to share parts of the &vmhost;'s file system makes the
+ virtualization environment more flexible by simplifying mutual data
+ exchange. Network file systems, such as CIFS and NFS, have been the
+ traditional way of sharing directories. But as they are not
+ specifically designed for virtualization purposes, they suffer from
+ major performance and feature issues.
+
+
+
+ SELinux Requirement: For security_model=mapped,
+ configure SELinux context:
+
+ &prompt.root; semanage fcontext -a -t virtiofsd_t "/tmp(/.*)?"
+&prompt.root; restorecon -Rv /tmp
+
+
+
+ Host Configuration
+
+ There is nothing special to do with form the host side, be sure that
+ virtiofsd is installed. The &vmguest; xml libvirt
+ file should have a configuration like:
+
+
+<filesystem type="virtiofs" accessmode="mapped"/>
+ <driver="virtiofs"/>
+ <source dir="/tmp"/>
+ <target dir="host_tmp"/>
+ <alias name="fs0"/>
+ <address type="pci" domain="0x0000" bus="0x01" slot="0x00" function="0x0"/>
+</filesystem>
+
+ 9p is deprecated
+
+ 9p Protocolit is a legacy solution with critical flaws. Moreover
+ it incurs ~30-50% higher CPU overhead than virtiofs
+ for sequential I/O due to constant context switching between user and kernel space.
+
+
+
+
+ Access Mode Options for virtiofs
+
+ The accessmode attribute in the <filesystem> element
+ defines how guest file permissions map to host permissions. Only two values are valid:
+
+
+ virtiofs Access Mode Options
+
+
+
+
+
+
+ accessmode
+ Description
+ Security Implications
+
+
+
+
+ mapped
+
+
+ The default mode. Maps guest UIDs/GIDs to host UIDs/GIDs using a translation table.
+ Guest files appear as if owned by a dedicated "virtiofs" user on the host (typically UID 1000).
+
+
+ Example: Guest user uid=1000 writes to host /tmp →
+ Host file appears as owned by virtiofsd user (not the guest user).
+
+
+
+
+ Recommended for all environments.
+
+
+ Prevents guest users from directly accessing host user accounts.
+
+
+ Does not require matching host users.
+
+
+
+
+ passthrough
+
+
+ Guest UIDs/GIDs are used directly on the host. The guest must have matching users on the host.
+
+
+ Example: Guest user uid=1000 writes to host /tmp →
+ Host file appears as owned by the user with uid=1000.
+
+
+
+
+ Only for trusted guests (e.g., same-tenant cloud environments).
+
+
+ Requires matching host users (e.g., host must have useradd -u 1000 guestuser).
+
+
+ Security risk: Compromised guest can directly access host user accounts.
+
+
+
+
+ none
+
+
+ Invalid value (common documentation error).
+
+
+ virtiofsd rejects this option with error:
+ security_model=none is not supported.
+
+
+ Always use mapped (default) or passthrough.
+
+
+
+
+ Causes immediate configuration failure.
+
+
+
+
+
+
+
+
+ Key Configuration Rule:
+ accessmode='mapped' must match the host's
+ security_model=mapped in virtiofsd.
+ Mismatched modes cause mount failures with errors like:
+ Failed to set security context: Operation not permitted.
+
+
+
+
+
+ Guest Configuration
+
+ On the guest, load the kernel module and mount the file system:
+
+
+ Guest Mount Command
+ the virtiofs module should be loaded automatically, if not do:
+ &prompt.root; modprobe virtiofs
+ Now you can mount the target dir on your &vmguest;:
+ &prompt.root; mount -t virtiofs -o dax host_tmp /mnt/hosttmp
+
+ Options:
+
+
+
+ virtiofs
+
+
+ dax: Enables direct access for performance (recommended). dax can not be used with btrfs filesystem.
+
+
+ host_tmp: the target dir in the &vmguest; configuration.
+
+
+
+
+
+ Persistent mounts virtiofs across reboot
+ Simply add this line to /etc/fstab:
+ host_tmp /mnt/hosttmp virtiofs rw,nofail 0 0
+
+
+ Troubleshooting Common Issues
+
+
+
+ Guest Kernel Check: verify the module virtiofs is loaded:
+
+ &prompt.root; lsmod | grep virtiofs
+
+
+ Permission denied: Check SELinux context (see )
+
+
+ Mount fails with 9p: Verify you used -t virtiofs (not 9p)
+
+
+ Guest writes not syncing: Add cache=none to mount options
+
+
+
+
+
+
+ KSM: sharing memory pages between guests
+
+ Kernel Same Page Merging () is a
+ Linux kernel feature that merges identical memory pages from multiple
+ running processes into one memory region. Because &kvm; guests run as
+ processes under Linux, provides
+ the memory overcommit feature to hypervisors for more efficient use of
+ memory. Therefore, if you need to run multiple virtual machines on a
+ host with limited memory, may be
+ helpful to you.
+
+
+ stores its status information in
+ the files under the /sys/kernel/mm/ksm directory:
+
+&prompt.user;ls -1 /sys/kernel/mm/ksm
+full_scans
+merge_across_nodes
+pages_shared
+pages_sharing
+pages_to_scan
+pages_unshared
+pages_volatile
+run
+sleep_millisecs
+
+ For more information on the meaning of the
+ /sys/kernel/mm/ksm/* files, see
+ /usr/src/linux/Documentation/vm/ksm.txt (package
+ kernel-source).
+
+
+ To use , do the following.
+
+
+
+
+ Although &productnameshort; includes
+ support in the kernel, it is
+ disabled by default. To enable it, run the following command:
+
+&prompt.root;echo 1 > /sys/kernel/mm/ksm/run
+
+
+
+ Now run several &vmguest;s under &kvm; and inspect the content of
+ files pages_sharing and
+ pages_shared, for example:
+
+&prompt.user;while [ 1 ]; do cat /sys/kernel/mm/ksm/pages_shared; sleep 1; done
+13522
+13523
+13519
+13518
+13520
+13520
+13528
+
+
+
+
+
diff --git a/references/qemu_monitor.xml b/references/qemu_monitor.xml
new file mode 100644
index 000000000..6c180dda8
--- /dev/null
+++ b/references/qemu_monitor.xml
@@ -0,0 +1,1110 @@
+
+
+ %entities;
+]>
+
+
+ Virtual machine administration using &qemu; monitor
+
+
+
+ yes
+
+
+
+ 2024-06-27
+
+
+
+
+
+
+
+ When a virtual machine is invoked by the &qemusystemarch; command, for
+ example qemu-system-x86_64, a monitor console is
+ provided for performing interaction with the user. Using the commands
+ available in the monitor console, it is possible to inspect the running
+ operating system, change removable media, take screenshots or audio grabs
+ and control other aspects of the virtual machine.
+
+
+
+ The following sections list selected useful &qemu; monitor commands and
+ their purpose. To get the full list, enter help in the
+ &qemu; monitor command line.
+
+
+
+ Accessing monitor console
+
+
+ No monitor console for &libvirt;
+
+ You can access the monitor console only if you started the virtual
+ machine directly with the &qemusystemarch; command and are viewing its
+ graphical output in a built-in &qemu; window.
+
+
+ If you started the virtual machine with &libvirt;, for example, using
+ virt-manager, and are viewing its output via VNC or
+ Spice sessions, you cannot access the monitor console directly. You
+ can, however, send the monitor command to the virtual machine via
+ &virsh;:
+
+&prompt.root;virsh qemu-monitor-command COMMAND
+
+
+
+ The way you access the monitor console depends on which display device
+ you use to view the output of a virtual machine. Find more details about
+ displays in .
+ For example, to view the monitor while the
+ option is in use, press
+ 2. Similarly, when
+ the option is in use, you can switch to the
+ monitor console by pressing the following key combination:
+ AC.
+
+
+
+ To get help while using the console, use help or
+ ?. To get help for a specific command, use
+ helpCOMMAND.
+
+
+
+ Getting information about the guest system
+
+
+ To get information about the guest system, use info.
+ If used without any option, the list of possible options is printed.
+ Options determine which part of the system is analyzed:
+
+
+
+
+ info version
+
+
+ Shows the version of &qemu;.
+
+
+
+
+ info commands
+
+
+ Lists available QMP commands.
+
+
+
+
+ info network
+
+
+ Shows the network state.
+
+
+
+
+ info chardev
+
+
+ Shows the character devices.
+
+
+
+
+ info block
+
+
+ Information about block devices, such as hard disks, floppy drives,
+ or CD-ROMs.
+
+
+
+
+ info blockstats
+
+
+ Read and write statistics on block devices.
+
+
+
+
+ info registers
+
+
+ Shows the CPU registers.
+
+
+
+
+ info cpus
+
+
+ Shows information about available CPUs.
+
+
+
+
+ info history
+
+
+ Shows the command line history.
+
+
+
+
+ info irq
+
+
+ Shows the interrupt statistics.
+
+
+
+
+ info pic
+
+
+ Shows the i8259 (PIC) state.
+
+
+
+
+ info pci
+
+
+ Shows the PCI information.
+
+
+
+
+ info tlb
+
+
+ Shows virtual to physical memory mappings.
+
+
+
+
+ info mem
+
+
+ Shows the active virtual memory mappings.
+
+
+
+
+ info jit
+
+
+ Shows dynamic compiler information.
+
+
+
+
+ info kvm
+
+
+ Shows the KVM information.
+
+
+
+
+ info numa
+
+
+ Shows the NUMA information.
+
+
+
+
+ info usb
+
+
+ Shows the guest USB devices.
+
+
+
+
+ info usbhost
+
+
+ Shows the host USB devices.
+
+
+
+
+ info profile
+
+
+ Shows the profiling information.
+
+
+
+
+ info capture
+
+
+ Shows the capture (audio grab) information.
+
+
+
+
+ info snapshots
+
+
+ Shows the currently saved virtual machine snapshots.
+
+
+
+
+ info status
+
+
+ Shows the current virtual machine status.
+
+
+
+
+ info mice
+
+
+ Shows which guest mice are receiving events.
+
+
+
+
+ info vnc
+
+
+ Shows the VNC server status.
+
+
+
+
+ info name
+
+
+ Shows the current virtual machine name.
+
+
+
+
+ info uuid
+
+
+ Shows the current virtual machine UUID.
+
+
+
+
+ info usernet
+
+
+ Shows the user network stack connection states.
+
+
+
+
+ info migrate
+
+
+ Shows the migration status.
+
+
+
+
+ info balloon
+
+
+ Shows the balloon device information.
+
+
+
+
+ info qtree
+
+
+ Shows the device tree.
+
+
+
+
+ info qdm
+
+
+ Shows the qdev device model list.
+
+
+
+
+ info roms
+
+
+ Shows the ROMs.
+
+
+
+
+ info migrate_cache_size
+
+
+ Shows the current migration xbzrle (Xor Based Zero Run
+ Length Encoding) cache size.
+
+
+
+
+ info migrate_capabilities
+
+
+ Shows the status of the multiple migration capabilities, such as
+ xbzrle compression.
+
+
+
+
+ info mtree
+
+
+ Shows the &vmguest; memory hierarchy.
+
+
+
+
+ info trace-events
+
+
+ Shows available trace-events and their status.
+
+
+
+
+
+
+ Changing VNC password
+
+
+ To change the VNC password, use the change vnc
+ password command and enter the new password:
+
+
+
+(qemu) change vnc password
+Password: ********
+(qemu)
+
+
+
+ Managing devices
+
+
+ To add a new disk while the guest is running (hotplug), use the
+ drive_add and device_add commands.
+ First define a new drive to be added as a device to bus 0:
+
+
+(qemu) drive_add 0 if=none,file=/tmp/test.img,format=raw,id=disk1
+OK
+
+
+ You can confirm your new device by querying the block subsystem:
+
+
+(qemu) info block
+[...]
+disk1: removable=1 locked=0 tray-open=0 file=/tmp/test.img ro=0 drv=raw \
+encrypted=0 bps=0 bps_rd=0 bps_wr=0 iops=0 iops_rd=0 iops_wr=0
+
+
+ After the new drive is defined, it needs to be connected to a device so
+ that the guest can see it. The typical device would be a
+ virtio-blk-pci or scsi-disk. To get
+ the full list of available values, run:
+
+
+(qemu) device_add ?
+name "VGA", bus PCI
+name "usb-storage", bus usb-bus
+[...]
+name "virtio-blk-pci", bus virtio-bus
+
+
+ Now add the device
+
+
+(qemu) device_add virtio-blk-pci,drive=disk1,id=myvirtio1
+
+
+ and confirm with
+
+
+(qemu) info pci
+[...]
+Bus 0, device 4, function 0:
+ SCSI controller: PCI device 1af4:1001
+ IRQ 0.
+ BAR0: I/O at 0xffffffffffffffff [0x003e].
+ BAR1: 32 bit memory at 0xffffffffffffffff [0x00000ffe].
+ id "myvirtio1"
+
+
+
+ Devices added with the device_add command can be
+ removed from the guest with device_del. Enter
+ help device_del on the &qemu; monitor command line
+ for more information.
+
+
+
+
+ To release the device or file connected to the removable media device,
+ use the ejectDEVICE
+ command. Use the optional to force ejection.
+
+
+
+ To change removable media (like CD-ROMs), use the
+ changeDEVICE command. The
+ name of the removable media can be determined using the info
+ block command:
+
+
+
+(qemu) info block
+ide1-cd0: type=cdrom removable=1 locked=0 file=/dev/sr0 ro=1 drv=host_device
+(qemu) change ide1-cd0 /path/to/image
+
+
+
+ Controlling keyboard and mouse
+
+
+ It is possible to use the monitor console to emulate keyboard and mouse
+ input if necessary. For example, if your graphical user interface
+ intercepts certain key combinations at low level (such as
+ F1
+ in X Window Syustem), you can still enter them using the
+ sendkeyKEYS:
+
+
+sendkey ctrl-alt-f1
+
+
+ To list the key names used in the KEYS option,
+ enter sendkey and press .
+
+
+
+ To control the mouse, the following commands can be used:
+
+
+
+
+ mouse_moveDXdy [DZ]
+
+
+ Move the active mouse pointer to the specified coordinates dx, dy
+ with the optional scroll axis dz.
+
+
+
+
+ mouse_buttonVAL
+
+
+ Change the state of the mouse buttons (1=left, 2=middle, 4=right).
+
+
+
+
+ mouse_setINDEX
+
+
+ Set which mouse device receives events. Device index numbers can be
+ obtained with the info mice command.
+
+
+
+
+
+
+ Changing available memory
+
+
+ If the virtual machine was started with the option (the paravirtualized balloon device is therefore
+ enabled), you can change the available memory dynamically. For more
+ information about enabling the balloon device, see
+ .
+
+
+
+ To get information about the balloon device in the monitor console and to
+ determine whether the device is enabled, use the info
+ balloon command:
+
+
+(qemu) info balloon
+
+
+ If the balloon device is enabled, use the balloon
+ MEMORY_IN_MB command to set the requested
+ amount of memory:
+
+
+(qemu) balloon 400
+
+
+ Dumping virtual machine memory
+
+
+ To save the content of the virtual machine memory to a disk or console
+ output, use the following commands:
+
+
+
+
+ memsaveADDRSIZEFILENAME
+
+
+ Saves virtual memory dump starting at
+ ADDR of size
+ SIZE to file
+ FILENAME
+
+
+
+
+ pmemsaveADDRSIZEFILENAME
+
+
+ Saves physical memory dump starting at
+ ADDR of size
+ SIZE to file
+ FILENAME-
+
+
+
+
+ x /FMTADDR
+
+
+ Makes a virtual memory dump starting at address
+ ADDR and formatted according to the
+ FMT string. The
+ FMT string consists of three parameters
+ COUNTFORMATSIZE:
+
+
+ The COUNT parameter is the number of
+ items to be dumped.
+
+
+ The FORMAT can be x
+ (hex), d (signed decimal), u
+ (unsigned decimal), o (octal),
+ c (char) or i (assembly
+ instruction).
+
+
+ The SIZE parameter can be
+ b (8 bits), h (16 bits),
+ w (32 bits) or g (64 bits).
+ On x86, h or w can be
+ specified with the i format to respectively
+ select 16 or 32-bit code instruction size.
+
+
+
+
+ xp /FMTADDR
+
+
+ Makes a physical memory dump starting at address
+ ADDR and formatted according to the
+ FMT string. The
+ FMT string consists of three parameters
+ COUNTFORMATSIZE:
+
+
+ The COUNT parameter is the number of the
+ items to be dumped.
+
+
+ The FORMAT can be x
+ (hex), d (signed decimal), u
+ (unsigned decimal), o (octal),
+ c (char) or i (asm
+ instruction).
+
+
+ The SIZE parameter can be
+ b (8 bits), h (16 bits),
+ w (32 bits) or g (64 bits).
+ On x86, h or w can be
+ specified with thei format to respectively
+ select 16 or 32-bit code instruction size.
+
+
+
+
+
+
+ Managing virtual machine snapshots
+
+
+ Managing snapshots in QEMU monitor is not supported by &suse;
+ yet. The information found in this section may be helpful in specific
+ cases.
+
+
+
+ snapshots are snapshots of the complete
+ virtual machine including the state of CPU, RAM and the content of all
+ writable disks. To use virtual machine snapshots, you must have at least
+ one non-removable and writable block device using the
+ qcow2 disk image format.
+
+
+
+ Snapshots are helpful when you need to save your virtual machine in a
+ particular state. For example, after you have configured network services
+ on a virtualized server and want to quickly start the virtual machine in
+ the same state that was saved last. You can also create a snapshot after
+ the virtual machine has been powered off to create a backup state before
+ you try something experimental and make &vmguest; unstable. This section
+ introduces the former case, while the latter is described in
+ .
+
+
+
+ The following commands are available for managing snapshots in &qemu;
+ monitor:
+
+
+
+
+ savevmNAME
+
+
+ Creates a new virtual machine snapshot under the tag
+ NAME or replaces an existing snapshot.
+
+
+
+
+ loadvmNAME
+
+
+ Loads a virtual machine snapshot tagged
+ NAME.
+
+
+
+
+ delvm
+
+
+ Deletes a virtual machine snapshot.
+
+
+
+
+ info snapshots
+
+
+ Prints information about available snapshots.
+
+(qemu) info snapshots
+Snapshot list:
+ID TAG VM SIZE DATE VM CLOCK
+1 booting 4.4M 2013-11-22 10:51:10 00:00:20.476
+2 booted 184M 2013-11-22 10:53:03 00:02:05.394
+3 logged_in 273M 2013-11-22 11:00:25 00:04:34.843
+4 ff_and_term_running 372M 2013-11-22 11:12:27 00:08:44.965
+
+
+
+ Unique auto-incremented identification number of the snapshot.
+
+
+
+
+ Unique description string of the snapshot. It is meant as a
+ human readable version of the ID.
+
+
+
+
+ The disk space occupied by the snapshot. The more memory is
+ consumed by running applications, the bigger the snapshot is.
+
+
+
+
+ Time and date the snapshot was created.
+
+
+
+
+ The current state of the virtual machine's clock.
+
+
+
+
+
+
+
+
+ Suspending and resuming virtual machine execution
+
+
+ The following commands are available for suspending and resuming virtual
+ machines:
+
+
+
+
+ stop
+
+
+ Suspends the execution of the virtual machine.
+
+
+
+
+ cont
+
+
+ Resumes the execution of the virtual machine.
+
+
+
+
+ system_reset
+
+
+ Resets the virtual machine. The effect is similar to the reset
+ button on a physical machine. This may leave the file system in an
+ unclean state.
+
+
+
+
+ system_powerdown
+
+
+ Sends an shutdown request to the
+ machine. The effect is similar to the power button on a physical
+ machine.
+
+
+
+
+ q or quit
+
+
+ Terminates &qemu; immediately.
+
+
+
+
+
+
+ Live migration
+
+
+ The live migration process allows to transmit any virtual machine from
+ one host system to another host system without any interruption in
+ availability. It is possible to change hosts permanently or only during
+ maintenance.
+
+
+
+ The requirements for live migration:
+
+
+
+
+
+ libvirt reauirements are applicable.
+
+
+
+
+
+ Live migration is only possible between &vmhost;s with the same CPU
+ features.
+
+
+
+
+ interface,
+ feature, and the
+ command line option are not compatible
+ with migration.
+
+
+
+
+ The guest on the source and destination hosts must be started in the
+ same way.
+
+
+
+
+ qemu command line option should not be
+ used for migration (and this qemu command line
+ option is not supported).
+
+
+
+
+
+ Support status
+
+ The postcopy mode is not yet supported in
+ &productname;. It is released as a technology preview only. For more
+ information about postcopy, see
+ .
+
+
+
+
+ More recommendations can be found at the following Web site:
+
+
+
+
+ The live migration process has the following steps:
+
+
+
+
+
+ The virtual machine instance is running on the source host.
+
+
+
+
+ The virtual machine is started on the destination host in the frozen
+ listening mode. The parameters used are the same as on the source
+ host plus the
+ parameter, where IP specifies the IP
+ address and PORT specifies the port for
+ listening to the incoming migration. If 0 is set as IP address, the
+ virtual machine listens on all interfaces.
+
+
+
+
+ On the source host, switch to the monitor console and use the
+ migrate -d tcp:
+ DESTINATION_IP:PORT
+ command to initiate the migration.
+
+
+
+
+ To determine the state of the migration, use the info
+ migrate command in the monitor console on the source host.
+
+
+
+
+ To cancel the migration, use the migrate_cancel
+ command in the monitor console on the source host.
+
+
+
+
+ To set the maximum tolerable downtime for migration in seconds, use
+ the migrate_set_downtime
+ NUMBER_OF_SECONDS command.
+
+
+
+
+ To set the maximum speed for migration in bytes per second, use the
+ migrate_set_speed
+ BYTES_PER_SECOND command.
+
+
+
+
+
+ QMP - &qemu; machine protocol
+
+
+ QMP is a JSON-based protocol that allows applications—such as
+ &libvirt;—to communicate with a running &qemu; instance. There are
+ several ways you can access the &qemu; monitor using QMP commands.
+
+
+
+ Access QMP via standard input/output
+
+ The most flexible way to use QMP is by specifying the
+ option. The following example creates a QMP
+ instance using standard input/output. In the following examples,
+ -> marks lines with commands sent from client to
+ the running &qemu; instance, while <- marks lines
+ with the output returned from &qemu;.
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev stdio,id=mon0 \
+-mon chardev=mon0,mode=control,pretty=on
+
+<- {
+ "QMP": {
+ "version": {
+ "qemu": {
+ "micro": 0,
+ "minor": 0,
+ "major": 2
+ },
+ "package": ""
+ },
+ "capabilities": [
+ ]
+ }
+}
+
+ When a new QMP connection is established, QMP sends its greeting
+ message and enters capabilities negotiation mode. In this mode, only
+ the qmp_capabilities command works. To exit
+ capabilities negotiation mode and enter command mode, the
+ qmp_capabilities command must be issued first:
+
+-> { "execute": "qmp_capabilities" }
+<- {
+ "return": {
+ }
+}
+
+ "return": {} is a QMP's success response.
+
+
+ QMP's commands can have arguments. For example, to eject a CD-ROM drive,
+ enter the following:
+
+->{ "execute": "eject", "arguments": { "device": "ide1-cd0" } }
+<- {
+ "timestamp": {
+ "seconds": 1410353381,
+ "microseconds": 763480
+ },
+ "event": "DEVICE_TRAY_MOVED",
+ "data": {
+ "device": "ide1-cd0",
+ "tray-open": true
+ }
+}
+{
+ "return": {
+ }
+}
+
+
+
+ Access QMP via telnet
+
+ Instead of the standard input/output, you can connect the QMP interface
+ to a network socket and communicate with it via a specified port:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev socket,id=mon0,host=localhost,port=4444,server,nowait \
+-mon chardev=mon0,mode=control,pretty=on
+
+ And then run telnet to connect to port 4444:
+
+&prompt.user;telnet localhost 4444
+Trying ::1...
+Connected to localhost.
+Escape character is '^]'.
+<- {
+ "QMP": {
+ "version": {
+ "qemu": {
+ "micro": 0,
+ "minor": 0,
+ "major": 2
+ },
+ "package": ""
+ },
+ "capabilities": [
+ ]
+ }
+}
+
+ You can create several monitor interfaces at the same time. The
+ following example creates one HMP instance—human monitor which
+ understands normal &qemu; monitor's commands—on the
+ standard input/output, and one QMP instance on localhost port 4444:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev stdio,id=mon0 -mon chardev=mon0,mode=readline \
+-chardev socket,id=mon1,host=localhost,port=4444,server,nowait \
+ -mon chardev=mon1,mode=control,pretty=on
+
+
+
+ Access QMP via Unix socket
+
+ Invoke &qemu; using the option, and create a Unix
+ socket:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-qmp unix:/tmp/qmp-sock,server --monitor stdio
+
+QEMU waiting for connection on: unix:./qmp-sock,server
+
+ To communicate with the &qemu; instance via the
+ /tmp/qmp-sock socket, use nc (see
+ man 1 nc for more information) from another terminal
+ on the same host:
+
+&prompt.sudo;nc -U /tmp/qmp-sock
+<- {"QMP": {"version": {"qemu": {"micro": 0, "minor": 0, "major": 2} [...]
+
+
+
+ Access QMP via &libvirt;'s virsh command
+
+ If you run your virtual machines under &libvirt; , you can communicate with its
+ running guests by running the virsh
+ qemu-monitor-command:
+
+&prompt.sudo;virsh qemu-monitor-command vm_guest1 \
+--pretty '{"execute":"query-kvm"}'
+<- {
+ "return": {
+ "enabled": true,
+ "present": true
+ },
+ "id": "libvirt-8"
+}
+
+ In the above example, we ran the simple command
+ query-kvm which checks if the host is capable of
+ running &kvm; and if &kvm; is enabled.
+
+
+ Generating human-readable output
+
+ To use the standard human-readable output format of &qemu; instead of
+ the JSON format, use the option:
+
+&prompt.sudo;virsh qemu-monitor-command vm_guest1 --hmp "query-kvm"
+
+
+
+
diff --git a/references/qemu_running_vms_qemukvm.xml b/references/qemu_running_vms_qemukvm.xml
new file mode 100644
index 000000000..5edfb1ab2
--- /dev/null
+++ b/references/qemu_running_vms_qemukvm.xml
@@ -0,0 +1,1979 @@
+
+
+ %entities;
+]>
+
+
+ Running virtual machines with qemu-system-ARCH
+
+
+
+ yes
+
+
+
+ 2024-06-27
+
+
+
+
+
+
+
+ Once you have a virtual disk image ready (for more information on disk
+ images, see ), you can
+ start the related virtual machine.
+ introduced simple commands
+ to install and run a &vmguest;. This article focuses on a more detailed
+ explanation of qemu-system-ARCH usage, and shows
+ solutions for more specific tasks. For a complete list of
+ qemu-system-ARCH's options, see its man page
+ (man 1 qemu).
+
+
+ Basic qemu-system-ARCH invocation
+
+
+ The qemu-system-ARCH command uses the following
+ syntax:
+
+
+qemu-system-ARCH OPTIONS -drive file=DISK_IMAGE
+
+
+
+
+ qemu-system-ARCH understands many options. Most of
+ them define parameters of the emulated hardware, while others affect
+ more general emulator behavior. If you do not supply any options,
+ default values are used, and you need to supply the path to a disk
+ image to be run.
+
+
+
+
+ Path to the disk image holding the guest system you want to
+ virtualize. qemu-system-ARCH supports many image
+ formats. Use qemu-img to
+ list them.
+
+
+
+
+
+ &aarch64; architecture
+
+ &kvm; support is available only for 64-bit Arm® architecture
+ (&aarch64;). Running &qemu; on the &aarch64; architecture requires you
+ to specify:
+
+
+
+
+ A machine type designed for &qemu; Arm® virtual machines using the
+ option.
+
+
+
+
+ A firmware image file using the option.
+
+
+ You can specify the firmware image files alternatively using the
+ options, for example:
+
+
+-drive file=/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw,if=pflash,format=raw
+-drive file=/var/lib/libvirt/qemu/nvram/opensuse_VARS.fd,if=pflash,format=raw
+
+
+
+
+ A CPU of the &vmhost; using the option
+ (default is ).
+
+
+
+
+ The same Generic Interrupt Controller (GIC) version as the host
+ using the option
+ (default is ).
+
+
+
+
+ If a graphic mode is needed, a graphic device of type
+ virtio-gpu-pci.
+
+
+
+
+ For example:
+
+
+&prompt.sudo;qemu-system-aarch64 [...] \
+ -bios /usr/share/qemu/qemu-uefi-aarch64.bin \
+ -cpu host \
+ -device virtio-gpu-pci \
+ -machine virt,accel=kvm,gic-version=host
+
+
+
+
+ General qemu-system-ARCH options
+
+
+ This section introduces general qemu-system-ARCH
+ options and options related to the basic emulated hardware, such as the
+ virtual machine's processor, memory, model type, or time processing
+ methods.
+
+
+
+
+ -name NAME_OF_GUEST
+
+
+ Specifies the name of the running guest system. The name is
+ displayed in the window caption and used for the VNC server.
+
+
+
+
+ -boot OPTIONS
+
+
+ Specifies the order in which the defined drives are booted. Drives
+ are represented by letters, where a and
+ b stand for the floppy drives 1 and 2,
+ c stands for the first hard disk,
+ d stands for the first CD-ROM drive, and
+ n to p stand for Ether-boot
+ network adapters.
+
+
+ For example, qemu-system-ARCH [...] -boot
+ order=ndc first tries to boot from the network, then from
+ the first CD-ROM drive, and finally from the first hard disk.
+
+
+
+
+ -pidfile FILENAME
+
+
+ Stores the &qemu;'s process identification number (PID) in a file.
+ This is useful if you run &qemu; from a script.
+
+
+
+
+ -nodefaults
+
+
+ By default &qemu; creates basic virtual devices even if you do not
+ specify them on the command line. This option turns this feature
+ off, and you must specify every single device manually, including
+ graphical and network cards, parallel or serial ports, or virtual
+ consoles. Even &qemu; monitor is not attached by default.
+
+
+
+
+ -daemonize
+
+
+ Daemonizes the &qemu; process after it is started.
+ &qemu; detaches from the standard input and standard output after
+ it is ready to receive connections on any of its devices.
+
+
+
+
+
+
+ SeaBIOS BIOS implementation
+
+ SeaBIOS is the default BIOS used. You can boot USB devices, any drive
+ (CD-ROM, Floppy or a hard disk). It has USB mouse and keyboard support
+ and supports multiple VGA cards. For more information about SeaBIOS,
+ refer to the SeaBIOS
+ Website.
+
+
+
+
+ Basic virtual hardware
+
+
+ Machine type
+
+ You can specify the type of the emulated machine. Run
+ qemu-system-ARCH -M help to view a list of
+ supported machine types.
+
+
+ ISA-PC
+
+ The machine type isapc: ISA-only-PC is
+ unsupported.
+
+
+
+
+ CPU model
+
+ To specify the type of the processor (CPU) model, run
+ qemu-system-ARCH -cpu
+ MODEL. Use qemu-system-ARCH -cpu
+ help to view a list of supported CPU models.
+
+
+
+ Other basic options
+
+ The following is a list of most commonly used options while launching
+ qemu from command line. To see all options
+ available refer to qemu-doc man page.
+
+
+
+ -m MEGABYTES
+
+
+ Specifies how many megabytes are used for the virtual RAM size.
+
+
+
+
+ -balloon virtio
+
+
+ Specifies a paravirtualized device to dynamically change the
+ amount of virtual RAM assigned to &vmguest;. The top limit is
+ the amount of memory specified with -m.
+
+
+
+
+ -smp NUMBER_OF_CPUS
+
+
+ Specifies how many CPUs to emulate. &qemu; supports up to 255
+ CPUs on the PC platform (up to 64 with KVM acceleration used).
+ This option also takes other CPU-related parameters, such as
+ number of sockets, number of
+ cores per socket, or number of
+ threads per core.
+
+
+
+
+
+ The following is an example of a working
+ qemu-system-ARCH command line:
+
+
+&prompt.sudo;qemu-system-x86_64 \
+ -name "SLES &productnumber;" \
+ -M pc-i440fx-2.7 -m 512 \
+ -machine accel=kvm -cpu kvm64 -smp 2 \
+ -drive format=raw,file=/images/sles.raw
+
+
+ &qemu; window with SLES as &vmguest;
+
+
+
+
+
+
+
+
+
+
+
+ -no-acpi
+
+
+ Disables support.
+
+
+
+
+ -S
+
+
+ &qemu; starts with CPU stopped. To start CPU, enter
+ c in &qemu; monitor. For more information,
+ see .
+
+
+
+
+
+
+
+
+ Storing and reading configuration of virtual devices
+
+
+ -readconfig CFG_FILE
+
+
+ Instead of entering the devices configuration options on the
+ command line each time you want to run &vmguest;,
+ qemu-system-ARCH can read it from a file that
+ was either previously saved with -writeconfig
+ or edited manually.
+
+
+
+
+ -writeconfig CFG_FILE
+
+
+ Dumps the current virtual machine's devices configuration to a
+ text file. It can be consequently re-used with the
+ -readconfig option.
+
+
+&prompt.sudo;qemu-system-x86_64 -name "SLES &productnumber;" \
+ -machine accel=kvm -M pc-i440fx-2.7 -m 512 -cpu kvm64 \
+ -smp 2 /images/sles.raw -writeconfig /images/sles.cfg
+(exited)
+&prompt.user;cat /images/sles.cfg
+# qemu config file
+
+[drive]
+ index = "0"
+ media = "disk"
+ file = "/images/sles_base.raw"
+
+
+ This way you can effectively manage the configuration of your
+ virtual machines' devices in a well-arranged way.
+
+
+
+
+
+
+
+ Guest real-time clock
+
+
+ -rtc OPTIONS
+
+
+ Specifies the way the RTC is handled inside a &vmguest;. By
+ default, the clock of the guest is derived from that of the host
+ system. Therefore, it is recommended that the host system clock
+ is synchronized with an accurate external clock, for example, via
+ NTP service.
+
+
+ If you need to isolate the &vmguest; clock from the host one,
+ specify clock=vm instead of the default
+ clock=host.
+
+
+ You can also specify the initial time of the &vmguest;'s clock
+ with the base option:
+
+&prompt.sudo;qemu-system-x86_64 [...] -rtc clock=vm,base=2010-12-03T01:02:00
+
+ Instead of a time stamp, you can specify utc
+ or localtime. The former instructs &vmguest;
+ to start at the current UTC value (Coordinated Universal Time,
+ see ), while
+ the latter applies the local time setting.
+
+
+
+
+
+
+
+ Using devices in &qemu;
+
+
+ &qemu; virtual machines emulate all devices needed to run a &vmguest;.
+ &qemu; supports, for example, several types of network cards, block
+ devices (hard and removable drives), USB devices, character devices
+ (serial and parallel ports), or multimedia devices (graphic and sound
+ cards). This section introduces options to configure multiple types of
+ supported devices.
+
+
+
+
+ If your device, such as -drive, needs a special
+ driver and driver properties to be set, specify them with the
+ -device option, and identify with
+ drive= suboption. For example:
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive if=none,id=drive0,format=raw \
+-device virtio-blk-pci,drive=drive0,scsi=off ...
+
+ To get help on available drivers and their properties, use
+ and .
+
+
+
+
+ Block devices
+
+ Block devices are vital for virtual machines. These are fixed or
+ removable storage media called drives. One of the
+ connected hard disks typically holds the guest operating system to be
+ virtualized.
+
+
+ drives are defined with
+ -drive. This option has many sub-options, so me of
+ which are described in this section. For the complete list, see the
+ man page (man 1 qemu).
+
+
+ Sub-options for the -drive option
+
+ file=image_fname
+
+
+ Specifies the path to the disk image that to be used with this
+ drive. If not specified, an empty (removable) drive is assumed.
+
+
+
+
+ if=drive_interface
+
+
+ Specifies the type of interface to which the drive is connected.
+ Currently only floppy,
+ scsi, ide, or
+ virtio are supported by &suse;.
+ virtio defines a paravirtualized disk driver.
+ Default is ide.
+
+
+
+
+ index=index_of_connector
+
+
+ Specifies the index number of a connector on the disk interface
+ (see the if option) where the drive is
+ connected. If not specified, the index is automatically
+ incremented.
+
+
+
+
+ media=type
+
+
+ Specifies the type of media. Can be disk for
+ hard disks, or cdrom for removable CD-ROM
+ drives.
+
+
+
+
+ format=img_fmt
+
+
+ Specifies the format of the connected disk image. If not
+ specified, the format is autodetected. Currently, &suse; supports
+ raw and qcow2 formats.
+
+
+
+
+ cache=method
+
+
+ Specifies the caching method for the drive. Possible values are
+ unsafe, writethrough,
+ writeback, directsync, or
+ none. To improve performance when using the
+ qcow2 image format, select
+ writeback. none disables
+ the host page cache and, therefore, is the safest option. Default
+ for image files is writeback.
+
+
+
+
+
+
+ To simplify defining block devices, &qemu; understands several
+ shortcuts which you may find handy when entering the
+ qemu-system-ARCH command line.
+
+
+ You can use
+
+&prompt.sudo;qemu-system-x86_64 -cdrom /images/cdrom.iso
+
+ instead of
+
+&prompt.sudo;qemu-system-x86_64 -drive format=raw,file=/images/cdrom.iso,index=2,media=cdrom
+
+ and
+
+&prompt.sudo;qemu-system-x86_64 -hda /images/imagei1.raw -hdb /images/image2.raw -hdc \
+/images/image3.raw -hdd /images/image4.raw
+
+ instead of
+
+&prompt.sudo;qemu-system-x86_64 -drive format=raw,file=/images/image1.raw,index=0,media=disk \
+-drive format=raw,file=/images/image2.raw,index=1,media=disk \
+-drive format=raw,file=/images/image3.raw,index=2,media=disk \
+-drive format=raw,file=/images/image4.raw,index=3,media=disk
+
+
+ Using host drives instead of images
+
+ As an alternative to using disk images (see
+ ) you can also use
+ existing &vmhost; disks, connect them as drives, and access them from
+ &vmguest;. Use the host disk device directly instead of disk image
+ file names.
+
+
+ To access the host CD-ROM drive, use
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive file=/dev/cdrom,media=cdrom
+
+ To access the host hard disk, use
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive file=/dev/hdb,media=disk
+
+ A host drive used by a &vmguest; must not be accessed concurrently by
+ the &vmhost; or another &vmguest;.
+
+
+
+ Freeing unused guest disk space
+
+ A is a type of disk image
+ file that grows in size as the user adds data to it, taking up only
+ as much disk space as is stored in it. For example, if you copy 1 GB
+ of data inside the sparse disk image, its size grows by 1 GB. If you
+ then delete, for example, 500 MB of the data, the image size does not
+ by default decrease as expected.
+
+
+ This is why the option is introduced on
+ the &kvm; command line. It tells the hypervisor to automatically free
+ the holes after deleting data from the sparse guest
+ image. This option is valid only for the
+ if=scsi drive interface:
+
+&prompt.sudo;qemu-system-x86_64 [...] -drive format=img_format,file=/path/to/file.img,if=scsi,discard=on
+
+ Support status
+
+ is not supported. This interface does not
+ map to virtio-scsi, but rather to the
+ lsi SCSI adapter.
+
+
+
+
+ IOThreads
+
+ IOThreads are dedicated event loop threads for virtio devices to
+ perform I/O requests to improve scalability, especially on an SMP
+ &vmhost; with SMP &vmguest;s using many disk devices. Instead of
+ using &qemu;'s main event loop for I/O processing, IOThreads allow
+ spreading I/O work across multiple CPUs and can improve latency when
+ properly configured.
+
+
+ IOThreads are enabled by defining IOThread objects. virtio devices
+ can then use the objects for their I/O event loops. Many virtio
+ devices can use a single IOThread object, or virtio devices and
+ IOThread objects can be configured in a 1:1 mapping. The following
+ example creates a single IOThread with ID
+ iothread0 which is then used as the event loop for
+ two virtio-blk devices.
+
+&prompt.sudo;qemu-system-x86_64 [...] -object iothread,id=iothread0\
+-drive if=none,id=drive0,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive0,scsi=off,\
+iothread=iothread0 -drive if=none,id=drive1,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive1,scsi=off,\
+iothread=iothread0 [...]
+
+ The following qemu command line example illustrates a 1:1 virtio
+ device to IOThread mapping:
+
+&prompt.sudo;qemu-system-x86_64 [...] -object iothread,id=iothread0\
+-object iothread,id=iothread1 -drive if=none,id=drive0,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive0,scsi=off,\
+iothread=iothread0 -drive if=none,id=drive1,cache=none,aio=native,\
+format=raw,file=filename -device virtio-blk-pci,drive=drive1,scsi=off,\
+ iothread=iothread1 [...]
+
+
+ Bio-based I/O path for virtio-blk
+
+ For better performance of I/O-intensive applications, a new I/O path
+ was introduced for the virtio-blk interface in kernel version 3.7.
+ This bio-based block device driver skips the I/O scheduler, and thus
+ shortens the I/O path in guest and has lower latency. It is
+ especially useful for high-speed storage devices, such as SSD disks.
+
+
+ The driver is disabled by default. To use it, do the following:
+
+
+
+
+ Append to the kernel
+ command line on the guest. You can do so via
+ &yast;SystemBoot
+ Loader.
+
+
+ You can do it also by editing
+ /etc/default/grub, searching for the line
+ that contains , and
+ adding the kernel parameter at the end. Then run
+ grub2-mkconfig >/boot/grub2/grub.cfg to
+ update the grub2 boot menu.
+
+
+
+
+ Reboot the guest with the new kernel command line active.
+
+
+
+
+ Bio-based driver on slow devices
+
+ The bio-based virtio-blk driver does not help on slow devices such
+ as spin hard disks. The reason is that the benefit of scheduling is
+ larger than what the shortened bio path offers. Do not use the
+ bio-based driver on slow devices.
+
+
+
+
+ Accessing iSCSI resources directly
+
+
+ QEMU now integrates with libiscsi. This allows
+ QEMU to access iSCSI resources directly and use them as virtual
+ machine block devices. This feature does not require any host iSCSI
+ initiator configuration, as is needed for a libvirt iSCSI target
+ based storage pool setup. Instead it directly connects guest storage
+ interfaces to an iSCSI target LUN via the user space library
+ libiscsi. iSCSI-based disk devices can also be specified in the
+ libvirt XML configuration.
+
+
+ RAW image format
+
+ This feature is only available using the RAW image format, as the
+ iSCSI protocol has certain technical limitations.
+
+
+
+
+ The following is the QEMU command line interface for iSCSI
+ connectivity.
+
+
+ virt-manager limitation
+
+ The use of libiscsi based storage provisioning is not yet exposed
+ by the virt-manager interface, but instead it would be configured
+ by directly editing the guest xml. This new way of accessing iSCSI
+ based storage is to be done at the command line.
+
+
+&prompt.sudo;qemu-system-x86_64 -machine accel=kvm \
+ -drive file=iscsi://192.168.100.1:3260/iqn.2016-08.com.example:314605ab-a88e-49af-b4eb-664808a3443b/0,\
+ format=raw,if=none,id=mydrive,cache=none \
+ -device ide-hd,bus=ide.0,unit=0,drive=mydrive ...
+
+ Here is an example snippet of guest domain xml which uses the
+ protocol based iSCSI:
+
+<devices>
+...
+ <disk type='network' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source protocol='iscsi' name='iqn.2013-07.com.example:iscsi-nopool/2'>
+ <host name='example.com' port='3260'/>
+ </source>
+ <auth username='myuser'>
+ <secret type='iscsi' usage='libvirtiscsi'/>
+ </auth>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+</devices>
+
+ Contrast that with an example which uses the host based iSCSI
+ initiator which virt-manager sets up:
+
+<devices>
+...
+ <disk type='block' device='disk'>
+ <driver name='qemu' type='raw' cache='none' io='native'/>
+ <source dev='/dev/disk/by-path/scsi-0:0:0:0'/>
+ <target dev='hda' bus='ide'/>
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01'
+ function='0x1'/>
+ </controller>
+</devices>
+
+
+ Using RADOS block devices with &qemu;
+
+ RADOS Block Devices (RBD) store data in a Ceph cluster. They allow
+ snapshotting, replication and data consistency. You can use an RBD
+ from your &kvm;-managed &vmguest;s similarly to how you use other
+ block devices.
+
+
+ For more details, refer to the
+ &ses;
+ &admin;, chapter Ceph as a Back-end
+ for QEMU KVM Instance.
+
+
+
+
+
+ Graphic devices and display options
+
+ This section describes &qemu; options affecting the type of the
+ emulated video card and the way &vmguest; graphical output is
+ displayed.
+
+
+ Defining video cards
+
+ &qemu; uses -vga to define a video card used to
+ display &vmguest; graphical output. The -vga
+ option understands the following values:
+
+
+
+ none
+
+
+ Disables video cards on &vmguest; (no video card is emulated).
+ You can still access the running &vmguest; via the serial
+ console.
+
+
+
+
+ std
+
+
+ Emulates a standard VESA 2.0 VBE video card. Use it if you
+ intend to use high display resolution on &vmguest;.
+
+
+
+
+ qxl
+
+
+ QXL is a paravirtual graphic card. It is VGA compatible
+ (including VESA 2.0 VBE support). qxl is
+ recommended when using the spice video
+ protocol.
+
+
+
+
+ virtio
+
+
+ Paravirtual VGA graphic card.
+
+
+
+
+
+
+ Display options
+
+ The following options affect the way &vmguest; graphical output is
+ displayed.
+
+
+
+ -display gtk
+
+
+ Display video output in a GTK window. This interface provides
+ UI elements to configure and control the VM during runtime.
+
+
+
+
+ -display sdl
+
+
+ Display video output via SDL in a separate graphics window. For
+ more information, see the SDL documentation.
+
+
+
+
+ -spice option[,option[,...]]
+
+
+ Enables the spice remote desktop protocol.
+
+
+
+
+ -display vnc
+
+
+ Refer to for more
+ information.
+
+
+
+
+ -nographic
+
+
+ Disables &qemu;'s graphical output. The emulated serial port is
+ redirected to the console.
+
+
+ After starting the virtual machine with
+ -nographic, press
+ A
+ H in the virtual console to view the list of
+ other useful shortcuts, for example, to toggle between the
+ console and the &qemu; monitor.
+
+&prompt.sudo;qemu-system-x86_64 -hda /images/sles_base.raw -nographic
+
+C-a h print this help
+C-a x exit emulator
+C-a s save disk data back to file (if -snapshot)
+C-a t toggle console timestamps
+C-a b send break (magic sysrq)
+C-a c switch between console and monitor
+C-a C-a sends C-a
+(pressed C-a c)
+
+QEMU 2.3.1 monitor - type 'help' for more information
+(qemu)
+
+
+
+ -no-frame
+
+
+ Disables decorations for the &qemu; window. Convenient for
+ dedicated desktop work space.
+
+
+
+
+ -full-screen
+
+
+ Starts &qemu; graphical output in full screen mode.
+
+
+
+
+ -no-quit
+
+
+ Disables the close button of the &qemu; window and prevents it
+ from being closed by force.
+
+
+
+
+ -alt-grab, -ctrl-grab
+
+
+ By default, the &qemu; window releases the
+ captured mouse after pressing
+ .
+ You can change the key combination to either
+
+ (-alt-grab), or the right
+ key
+ (-ctrl-grab).
+
+
+
+
+
+
+
+
+ USB devices
+
+ There are two ways to create USB devices usable by the &vmguest; in
+ &kvm;: you can either emulate new USB devices inside a &vmguest;, or
+ assign an existing host USB device to a &vmguest;. To use USB devices
+ in &qemu; you first need to enable the generic USB driver with the
+ option. Then you can specify individual devices
+ with the option.
+
+
+ Emulating USB devices in &vmguest;
+
+ &suse; currently supports the following types of USB devices:
+ disk, host,
+ serial, braille,
+ net, mouse, and
+ tablet.
+
+
+ Types of USB devices for the -usbdevice option
+
+ disk
+
+
+ Emulates a mass storage device based on file. The optional
+ format option is used rather than detecting
+ the format.
+
+&prompt.sudo;qemu-system-x86_64 [...] -usbdevice
+ disk:format=raw:/virt/usb_disk.raw
+
+
+
+ host
+
+
+ Pass through the host device (identified by bus.addr).
+
+
+
+
+ serial
+
+
+ Serial converter to a host character device.
+
+
+
+
+ braille
+
+
+ Emulates a braille device using BrlAPI to display the braille
+ output.
+
+
+
+
+ net
+
+
+ Emulates a network adapter that supports CDC Ethernet and RNDIS
+ protocols.
+
+
+
+
+ mouse
+
+
+ Emulates a virtual USB mouse. This option overrides the default
+ PS/2 mouse emulation. The following example shows the hardware
+ status of a mouse on &vmguest; started with
+ qemu-system-ARCH [...] -usbdevice mouse:
+
+&prompt.sudo;hwinfo --mouse
+20: USB 00.0: 10503 USB Mouse
+[Created at usb.122]
+UDI: /org/freedesktop/Hal/devices/usb_device_627_1_1_if0
+[...]
+Hardware Class: mouse
+Model: "Adomax QEMU USB Mouse"
+Hotplug: USB
+Vendor: usb 0x0627 "Adomax Technology Co., Ltd"
+Device: usb 0x0001 "QEMU USB Mouse"
+[...]
+
+
+
+ tablet
+
+
+ Emulates a pointer device that uses absolute coordinates (such
+ as touchscreen). This option overrides the default PS/2 mouse
+ emulation. The tablet device is useful if you are viewing
+ &vmguest; via the VNC protocol. See
+ for more information.
+
+
+
+
+
+
+
+
+ Character devices
+
+ Use -chardev to create a new character device. The
+ option uses the following general syntax:
+
+qemu-system-x86_64 [...] -chardev BACKEND_TYPE,id=ID_STRING
+
+ where BACKEND_TYPE can be one of
+ null, socket,
+ udp, msmouse,
+ vc, file,
+ pipe, console,
+ serial, pty,
+ stdio, braille,
+ tty, or parport. All character
+ devices must have a unique identification string up to 127 characters
+ long. It is used to identify the device in other related directives.
+ For the complete description of all back-end's sub-options, see the
+ man page (man 1 qemu). A brief description of the
+ available back-ends follows:
+
+
+
+ null
+
+
+ Creates an empty device that outputs no data and drops any data
+ it receives.
+
+
+
+
+ stdio
+
+
+ Connects to &qemu;'s process standard input and standard output.
+
+
+
+
+ socket
+
+
+ Creates a two-way stream socket. If
+ PATH is specified, a Unix socket is
+ created:
+
+&prompt.sudo;qemu-system-x86_64 [...] -chardev \
+socket,id=unix_socket1,path=/tmp/unix_socket1,server
+
+ The SERVER suboption specifies that
+ the socket is a listening socket.
+
+
+ If PORT is specified, a TCP socket is
+ created:
+
+&prompt.sudo;qemu-system-x86_64 [...] -chardev \
+socket,id=tcp_socket1,host=localhost,port=7777,server,nowait
+
+ The command creates a local listening (server)
+ TCP socket on port 7777. &qemu; does not block waiting for a
+ client to connect to the listening port
+ (nowait).
+
+
+
+
+ udp
+
+
+ Sends all network traffic from &vmguest; to a remote host over
+ the UDP protocol.
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev udp,id=udp_fwd,host=&wsIVname;,port=7777
+
+ The command binds port 7777 on the remote host &wsIVname; and
+ sends &vmguest; network traffic there.
+
+
+
+
+ vc
+
+
+ Creates a new &qemu; text console. You can optionally specify the
+ dimensions of the virtual console:
+
+&prompt.sudo;qemu-system-x86_64 [...] -chardev vc,id=vc1,width=640,height=480 \
+-mon chardev=vc1
+
+ The command creates a new virtual console called
+ vc1 of the specified size, and connects the
+ &qemu; monitor to it.
+
+
+
+
+ file
+
+
+ Logs all traffic from &vmguest; to a file on &vmhost;. The
+ path is required and is automatically created
+ if it does not exist.
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-chardev file,id=qemu_log1,path=/var/log/qemu/guest1.log
+
+
+
+
+ By default &qemu; creates a set of character devices for serial and
+ parallel ports, and a special console for &qemu; monitor. However, you
+ can create your own character devices and use them for the mentioned
+ purposes. The following options may help you:
+
+
+
+ -serial CHAR_DEV
+
+
+ Redirects the &vmguest;'s virtual serial port to a character
+ device CHAR_DEV on &vmhost;. By
+ default, it is a virtual console (vc) in
+ graphical mode, and stdio in non-graphical
+ mode. The -serial understands many
+ sub-options. See the man page man 1 qemu
+ for a complete list of them.
+
+
+ You can emulate up to four serial ports. Use -serial
+ none to disable all serial ports.
+
+
+
+
+ -parallel DEVICE
+
+
+ Redirects the &vmguest;'s parallel port to a
+ DEVICE. This option supports the same
+ devices as -serial.
+
+
+
+ With
+ &sls;&opensuse;
+ Leap as a &vmhost;, you can directly use the hardware
+ parallel port devices /dev/parportN where
+ N is the number of the port.
+
+
+
+ You can emulate up to three parallel ports. Use
+ -parallel none to disable all parallel ports.
+
+
+
+
+ -monitor CHAR_DEV
+
+
+ Redirects the &qemu; monitor to a character device
+ CHAR_DEV on &vmhost;. This option
+ supports the same devices as -serial. By
+ default, it is a virtual console (vc) in a
+ graphical mode, and stdio in non-graphical
+ mode.
+
+
+
+
+
+ For a complete list of available character devices back-ends, see the
+ man page (man 1 qemu).
+
+
+
+
+ Networking in &qemu;
+
+
+ Use the -netdev option in combination with
+ to define a specific type of networking and a
+ network interface card for your &vmguest;. The syntax for the
+ option is
+
+
+-netdev type[,prop[=value][,...]]
+
+
+ Currently, &suse; supports the following network types:
+ user, bridge, and
+ tap. For a complete list of -netdev
+ sub-options, see the man page (man 1 qemu).
+
+
+
+ Supported -netdev sub-options
+
+ bridge
+
+
+ Uses a specified network helper to configure the TAP interface and
+ attach it to a specified bridge. For more information, see
+ .
+
+
+
+
+ user
+
+
+ Specifies user-mode networking. For more information, see
+ .
+
+
+
+
+ tap
+
+
+ Specifies bridged or routed networking. For more information, see
+ .
+
+
+
+
+
+
+ Defining a network interface card
+
+ Use -netdev together with the related
+ option to add a new emulated network card:
+
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev tap,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,vlan=1,\
+macaddr=&wsIVmac;,name=ncard1
+
+
+
+ Specifies the network device type.
+
+
+
+
+ Specifies the model of the network card. Use
+ qemu-system-ARCH -device help and search for the
+ Network devices:section to get the list of all
+ network card models supported by &qemu; on your platform.
+
+ cwickert 2017-09-01: still up to date?
+
+ Currently, &suse; supports the models rtl8139,
+ e1000 and its variants
+ e1000-82540em, e1000-82544gc
+ and e1000-82545em, and
+ virtio-net-pci. To view a list of options for a
+ specific driver, add as a driver option:
+
+&prompt.sudo;qemu-system-x86_64 -device e1000,help
+e1000.mac=macaddr
+e1000.vlan=vlan
+e1000.netdev=netdev
+e1000.bootindex=int32
+e1000.autonegotiation=on/off
+e1000.mitigation=on/off
+e1000.addr=pci-devfn
+e1000.romfile=str
+e1000.rombar=uint32
+e1000.multifunction=on/off
+e1000.command_serr_enable=on/off
+
+
+
+ Connects the network interface to VLAN number 1. You can specify
+ your own number—it is mainly useful for identification
+ purpose. If you omit this suboption, &qemu; uses the default 0.
+
+
+
+
+ Specifies the Media Access Control (MAC) address for the network
+ card. It is a unique identifier and you are advised to always
+ specify it. If not, &qemu; supplies its own default MAC address and
+ creates a possible MAC address conflict within the related VLAN.
+
+
+
+
+
+
+ User-mode networking
+
+ The -netdev user option instructs &qemu; to use
+ user-mode networking. This is the default if no networking mode is
+ selected. Therefore, these command lines are equivalent:
+
+&prompt.sudo;qemu-system-x86_64 -hda /images/sles_base.raw
+&prompt.sudo;qemu-system-x86_64 -hda /images/sles_base.raw -netdev user,id=hostnet0
+
+ This mode is useful to allow the &vmguest; to access the external
+ network resources, such as the Internet. By default, no incoming
+ traffic is permitted and therefore, the &vmguest; is not visible to
+ other machines on the network. No administrator privileges are required
+ in this networking mode. The user-mode is also useful for doing a
+ network boot on your &vmguest; from a local directory on &vmhost;.
+
+
+ The &vmguest; allocates an IP address from a virtual DHCP server.
+ &vmhost; (the DHCP server) is reachable at 10.0.2.2, while the IP
+ address range for allocation starts from 10.0.2.15. You can use
+ ssh to connect to &vmhost; at 10.0.2.2, and
+ scp to copy files back and forth.
+
+
+ Command line examples
+
+ This section shows several examples on how to set up user-mode
+ networking with &qemu;.
+
+
+ Restricted user-mode networking
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,vlan=1,name=user_net1,restrict=yes
+
+
+
+ Specifies user-mode networking.
+
+
+
+
+ Connects to VLAN number 1. If omitted, defaults to 0.
+
+
+
+
+ Specifies a human-readable name of the network stack. Useful
+ when identifying it in the &qemu; monitor.
+
+
+
+
+ Isolates &vmguest;. It then cannot communicate with &vmhost;
+ and no network packets are routed to the external network.
+
+
+
+
+
+ User-mode networking with custom IP range
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,net=10.2.0.0/8,host=10.2.0.6,\
+dhcpstart=10.2.0.20,hostname=tux_kvm_guest
+
+
+
+ Specifies the IP address of the network that &vmguest; sees and
+ optionally the netmask. Default is 10.0.2.0/8.
+
+
+
+
+ Specifies the &vmhost; IP address that &vmguest; sees. Default
+ is 10.0.2.2.
+
+
+
+
+ Specifies the first of the 16 IP addresses that the built-in
+ DHCP server can assign to &vmguest;. Default is 10.0.2.15.
+
+
+
+
+ Specifies the host name that the built-in DHCP server assigns
+ to &vmguest;.
+
+
+
+
+
+ User-mode networking with network-boot and TFTP
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,tftp=/images/tftp_dir,\
+bootfile=/images/boot/pxelinux.0
+
+
+
+ Activates a built-in TFTP (a file transfer protocol with the
+ functionality of a basic FTP) server. The files in the
+ specified directory are visible to a &vmguest; as the root of a
+ TFTP server.
+
+
+
+
+ Broadcasts the specified file as a BOOTP (a network protocol
+ that offers an IP address and a network location of a boot
+ image, often used in diskless workstations) file. When used
+ together with tftp, the &vmguest; can boot
+ via the network from the local directory on the host.
+
+
+
+
+
+ User-mode networking with host port forwarding
+&prompt.sudo;qemu-system-x86_64 [...] \
+-netdev user,id=hostnet0 \
+-device virtio-net-pci,netdev=hostnet0,hostfwd=tcp::2222-:22
+
+ Forwards incoming TCP connections to the port 2222 on the host to
+ the port 22 (SSH) on
+ &vmguest;. If sshd is
+ running on &vmguest;, enter
+
+&prompt.user;ssh qemu_host -p 2222
+
+ where qemu_host is the host name or IP address
+ of the host system, to get a
+ SSH prompt from &vmguest;.
+
+
+
+
+
+
+ Bridged networking
+
+ With the -netdev tap option, &qemu; creates a
+ network bridge by connecting the host TAP network device to a specified
+ VLAN of &vmguest;. Its network interface is then visible to the rest of
+ the network. This method does not work by default and needs to be
+ explicitly specified.
+
+
+ First, create a network bridge and add a &vmhost; physical network
+ interface to it, such as eth0:
+
+
+
+
+ Start &yastcc; and select
+ SystemNetwork
+ Settings.
+
+
+
+
+ Click Add and select Bridge
+ from the Device Type drop-down box in the
+ Hardware Dialog window. Click
+ Next.
+
+
+
+
+ Choose whether you need a dynamically or statically assigned IP
+ address, and fill the related network settings if applicable.
+
+
+
+
+ In the Bridged Devices pane, select the Ethernet
+ device to add to the bridge.
+
+
+ Click Next. When asked about adapting an already
+ configured device, click Continue.
+
+
+
+
+ Click OK to apply the changes. Check if the
+ bridge is created:
+
+&prompt.user;bridge link
+2: eth0 state UP : <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 master br0 \
+ state forwarding priority 32 cost 100
+
+
+
+
+ Connecting to a bridge manually
+
+ Use the following example script to connect &vmguest; to the newly
+ created bridge interface br0. Several commands in
+ the script are run via the sudo mechanism because
+ they require &rootuser; privileges.
+
+
+ Required software
+
+ To manage a network bridge, you need to have the
+ tunctl package installed.
+
+
+
+#!/bin/bash
+bridge=br0
+tap=$(sudo tunctl -u $(whoami) -b)
+sudo ip link set $tap up
+sleep 1s
+sudo ip link add name $bridge type bridge
+sudo ip link set $bridge up
+sudo ip link set $tap master $bridge
+qemu-system-x86_64 -machine accel=kvm -m 512 -hda /images/sles_base.raw \
+ -netdev tap,id=hostnet0 \
+ -device virtio-net-pci,netdev=hostnet0,vlan=0,macaddr=&wsIVmac;,\
+ ifname=$tap,script=no,downscript=no
+sudo ip link set $tap nomaster
+sudo ip link set $tap down
+sudo tunctl -d $tap
+
+
+
+ Name of the bridge device.
+
+
+
+
+ Prepare a new TAP device and assign it to the user who runs the
+ script. TAP devices are virtual network devices often used for
+ virtualization and emulation setups.
+
+
+
+
+ Bring up the newly created TAP network interface.
+
+
+
+
+ Make a 1-second pause to make sure the new TAP network interface
+ is really up.
+
+
+
+
+ Add the new TAP device to the network bridge
+ br0.
+
+
+
+
+ The ifname= suboption specifies the name of
+ the TAP network interface used for bridging.
+
+
+
+
+ Before qemu-system-ARCH connects to a network
+ bridge, it checks the script and
+ downscript values. If it finds the specified
+ scripts on the &vmhost; file system, it runs the
+ script before it connects to the network
+ bridge and downscript after it exits the
+ network environment. You can use these scripts to set up and tear
+ down the bridged interfaces. By default,
+ /etc/qemu-ifup and
+ /etc/qemu-ifdown are examined. If
+ script=no and downscript=no
+ are specified, the script execution is disabled and you need to
+ take care of it manually.
+
+
+
+
+ Deletes the TAP interface from a network bridge
+ br0.
+
+
+
+
+ Sets the state of the TAP device to down.
+
+
+
+
+ Tear down the TAP device.
+
+
+
+
+
+ Connecting to a bridge with qemu-bridge-helper
+
+ Another way to connect &vmguest; to a network through a network
+ bridge is via the qemu-bridge-helper helper
+ program. It configures the TAP interface for you, and attaches it to
+ the specified bridge. The default helper executable is
+ /usr/lib/qemu-bridge-helper. The helper
+ executable is setuid root, which is only executable by the members of
+ the virtualization group (kvm). Therefore the
+ qemu-system-ARCH command itself does not need to
+ be run under &rootuser; privileges.
+
+
+ The helper is automatically called when you specify a network bridge:
+
+qemu-system-x86_64 [...] \
+ -netdev bridge,id=hostnet0,vlan=0,br=br0 \
+ -device virtio-net-pci,netdev=hostnet0
+
+ You can specify your own custom helper script that takes care of the
+ TAP device (de)configuration, with the
+ option:
+
+qemu-system-x86_64 [...] \
+ -netdev bridge,id=hostnet0,vlan=0,br=br0,helper=/path/to/bridge-helper \
+ -device virtio-net-pci,netdev=hostnet0
+
+
+ To define access privileges to
+ qemu-bridge-helper, inspect the
+ /etc/qemu/bridge.conf file. For example, the
+ following directive
+
+allow br0
+
+ allows the qemu-system-ARCH command to connect
+ its &vmguest; to the network bridge br0.
+
+
+
+
+
+
+ Viewing a &vmguest; with VNC
+
+
+ By default &qemu; uses a GTK (a cross-platform toolkit library) window to
+ display the graphical output of a &vmguest;.
+
+ 2014-08-06 - fs: feedback from brogers: We should explain the "display
+ xxx" command line syntax to present the various display options before
+ diving into the vnc details.
+
+ With the -vnc option specified, you can make &qemu;
+ listen on a specified VNC display and redirect its graphical output to
+ the VNC session.
+
+
+
+
+ When working with &qemu;'s virtual machine via VNC session, it is
+ useful to work with the -usbdevice tablet option.
+
+
+ Moreover, if you need to use another keyboard layout than the default
+ en-us, specify it with the -k
+ option.
+
+
+
+
+ The first suboption of -vnc must be a
+ display value. The -vnc option
+ understands the following display specifications:
+
+
+
+
+ host:display
+
+
+ Only connections from host on the display number
+ display are accepted. The TCP port on which the
+ VNC session is then running is normally a 5900 +
+ display number. If you do not specify
+ host, connections are accepted from any host.
+
+
+
+
+ unix:path
+
+
+ The VNC server listens for connections on Unix domain sockets. The
+ path option specifies the location of the
+ related Unix socket.
+
+
+
+
+ none
+
+
+ The VNC server functionality is initialized, but the server itself
+ is not started. You can start the VNC server later with the &qemu;
+ monitor. For more information, see
+ .
+
+
+
+
+
+
+ Following the display value there may be one or more option flags
+ separated by commas. Valid options are:
+
+
+
+
+ reverse
+
+
+ Connect to a listening VNC client via a
+ reverse connection.
+
+
+
+
+ websocket
+
+
+ Opens an additional TCP listening port dedicated to VNC Websocket
+ connections. By definition the Websocket port is 5700+display.
+
+
+
+
+ password
+
+
+ Require that password-based authentication is used for client
+ connections.
+
+
+
+
+ tls
+
+
+ Require that clients use TLS when communicating with the VNC
+ server.
+
+
+
+
+ x509=/path/to/certificate/dir
+
+
+ Valid if TLS is specified. Require that x509 credentials are used
+ for negotiating the TLS session.
+
+
+
+
+ x509verify=/path/to/certificate/dir
+
+
+ Valid if TLS is specified. Require that x509 credentials are used
+ for negotiating the TLS session.
+
+
+
+
+ sasl
+
+
+ Require that the client uses SASL to authenticate with the VNC
+ server.
+
+
+
+
+ acl
+
+
+ Turn on access control lists for checking of the x509 client
+ certificate and SASL party.
+
+
+
+
+ lossy
+
+
+ Enable lossy compression methods (gradient, JPEG, ...).
+
+
+
+
+ non-adaptive
+
+
+ Disable adaptive encodings. Adaptive encodings are enabled by
+ default.
+
+
+
+
+ share=[allow-exclusive|force-shared|ignore]
+
+
+ Set display sharing policy.
+
+
+
+
+
+
+
+ For more details about the display options, see the
+ qemu-doc man page.
+
+
+
+
+ An example VNC usage:
+
+
+&prompt.sudo;qemu-system-x86_64 [...] -vnc :5
+# (on the client:)
+&prompt.user;vncviewer &wsII;:5 &
+
+
+ &qemu; VNC session
+
+
+
+
+
+
+
+
+
+
+
+ Secure VNC connections
+
+ The default VNC server setup does not use any form of authentication.
+ In the previous example, any user can connect and view the &qemu; VNC
+ session from any host on the network.
+
+
+ There are several levels of security that you can apply to your VNC
+ client/server connection. You can either protect your connection with a
+ password, use x509 certificates, use SASL authentication, or even
+ combine several authentication methods in one &qemu; command.
+
+
+
+ For more information about configuring x509 certificates on a &vmhost;
+ and the client.
+
+
+ The Remmina VNC viewer supports advanced authentication mechanisms. For
+ this example, let us assume that the server x509 certificates
+ ca-cert.pem, server-cert.pem,
+ and server-key.pem are located in the
+ /etc/pki/qemu directory on the host. The client
+ certificates can be placed in any custom directory, as Remmina asks for
+ their path on the connection start-up.
+
+
+ Password authentication
+qemu-system-x86_64 [...] -vnc :5,password -monitor stdio
+
+ Starts the &vmguest; graphical output on VNC display number 5 which
+ corresponds to port 5905. The password suboption
+ initializes a simple password-based authentication method. There is
+ no password set by default and you need to set one with the
+ change vnc password command in &qemu; monitor:
+
+QEMU 2.3.1 monitor - type 'help' for more information
+(qemu) change vnc password
+Password: ****
+
+
+ You need the -monitor stdio option here, because
+ you would not be able to manage the &qemu; monitor without
+ redirecting its input/output.
+
+
+
+ Authentication dialog in Remmina
+
+
+
+
+
+
+
+
+
+
+ x509 certificate authentication
+
+ The &qemu; VNC server can use TLS encryption for the session and x509
+ certificates for authentication. The server asks the client for a
+ certificate and validates it against the CA certificate. Use this
+ authentication type if your company provides an internal certificate
+ authority.
+
+qemu-system-x86_64 [...] -vnc :5,tls,x509verify=/etc/pki/qemu
+
+
+ x509 certificate and password authentication
+
+ You can combine the password authentication with TLS encryption and
+ x509 certificate authentication to create a two-layer authentication
+ model for clients. Remember to set the password in the &qemu; monitor
+ after you run the following command:
+
+qemu-system-x86_64 [...] -vnc :5,password,tls,x509verify=/etc/pki/qemu \
+-monitor stdio
+
+
+ SASL authentication
+
+ Simple Authentication and Security Layer (SASL) is a framework for
+ authentication and data security in Internet protocols. It integrates
+ several authentication mechanisms, like PAM, Kerberos, LDAP and more.
+ SASL keeps its own user database, so the connecting user accounts do
+ not need to exist on &vmhost;.
+
+
+ For security reasons, you are advised to combine SASL authentication
+ with TLS encryption and x509 certificates:
+
+qemu-system-x86_64 [...] -vnc :5,tls,x509,sasl -monitor stdio
+
+
+
+
diff --git a/references/vt_io.xml b/references/virtualization-io.xml
similarity index 93%
rename from references/vt_io.xml
rename to references/virtualization-io.xml
index 81a0680af..d744a12cc 100644
--- a/references/vt_io.xml
+++ b/references/virtualization-io.xml
@@ -4,7 +4,7 @@
%entities;
]>
-See
- for a list of guest operating systems supporting
- paravirtualization.
+ paravirtualization API, or availability of paravirtualized drivers. For a list of guest operating systems supporting paravirtualization, refer to
+ the section Availability of paravirtualized drivers of the article Virtualization Limits and
+ Support.
@@ -77,9 +77,8 @@
VFIO stands for Virtual Function I/O and is a new
user-level driver framework for Linux. It replaces the traditional &kvm;
&pciback; device assignment. The VFIO driver exposes direct device access
- to user space in a secure memory
- () protected environment.
+ to user space in a secure memory Input/Output Memory Management Unit (IOMMU)
+ protected environment.
With VFIO, a &vmguest; can directly access hardware devices on the
&vmhost; (pass-through), avoiding performance issues caused by emulation
in performance critical paths. This method does not allow to share
diff --git a/references/virtualization-spice-removal.xml b/references/virtualization-spice-removal.xml
new file mode 100644
index 000000000..98191cd5a
--- /dev/null
+++ b/references/virtualization-spice-removal.xml
@@ -0,0 +1,156 @@
+
+
+ %entities;
+]>
+
+
+
+ Deprecation of Spice and Migration to VNC in SUSE Linux Enterprise 16
+
+
+
+ yes
+
+
+
+ Overview of Spice Deprecation
+
+ Removal of Spice in &suselinux; 16
+
+ Starting with &suselinux; 16, support for the Spice remote computing protocol for virtual machines (VMs)
+ has been completely removed in favor of VNC, which is a more universally adopted standard.
+ Consequently, any attempt to start a Spice-based VM on an &slea; 16 host will fail with an error.
+ All existing VMs that were created on &slea; 15 SP7 or earlier and are configured to use Spice for the
+ graphical console must be manually converted to use VNC. Currently, no automated conversion tool
+ is provided, so this process requires manually editing the VM's XML configuration file.
+
+
+
+ The following procedure outlines the steps to convert a VM from Spice to VNC.
+
+
+
+ Ensure the virtual machine is completely shut down before making any changes. This prevents any potential data corruption or configuration mismatches.
+ &prompt.sudo;virsh shutdown VM-NAME
+
+
+ Export the current XML configuration of the VM to a file. This file will be edited in the next steps.
+ &prompt.sudo;virsh dumpxml VM-NAME > VM-NAME.xml
+
+
+ Create a backup of the original XML definition. This allows you to restore the original configuration if something goes wrong during the editing process.
+ &prompt.user;cp VM-NAME.xmlVM-NAME-SPICE.xml
+
+
+ Manually edit the VM's XML configuration file to remove all Spice-related elements and
+ modify others to use VNC. For detailed information, see the subsequent sections.
+
+
+
+
+
+ Removing Spice-Specific XML Elements
+ The following XML snippets are specific to the Spice protocol and must be removed from the VM's configuration file. These elements enable features like the Spice agent channel and USB redirection over Spice, which are no longer supported.
+
+ <!-- This channel is for the Spice agent and is no longer needed -->
+ <channel type='spicevmc'>
+ <target type='virtio' name='com.redhat.spice.0'/>
+ <address type='virtio-serial' controller='0' bus='0' port='2'/>
+ </channel>
+
+ <!-- These elements enable USB redirection over Spice and must be removed -->
+ <redirdev bus='usb' type='spicevmc'>
+ <address type='usb' bus='0' port='2'/>
+ </redirdev>
+
+ <redirdev bus='usb' type='spicevmc'>
+ <address type='usb' bus='0' port='3'/>
+ </redirdev>
+
+
+
+ Modifying XML Elements for VNC Compatibility
+ After removing the Spice-specific elements, modify several other XML elements to switch the VM from using Spice to VNC for its graphical console and other devices.
+
+ Graphics and Audio
+ The primary change is to switch the graphics display protocol from Spice to VNC. You should also disable the audio device, as Spice-based audio is no longer supported.
+ Replace the Spice graphics configuration:
+ <graphics type='spice' autoport='yes' listen='127.0.0.1'>
+ with the VNC equivalent:
+ <graphics type='vnc' autoport='yes' listen='127.0.0.1'>
+ Then, replace the Spice audio configuration:
+ <audio id='1' type='spice'/>
+ with:
+ <audio id='1' type='none'/>
+
+
+ QXL Video Device Conversion for Windows VMs
+ The QXL video driver is optimized for the Spice protocol and is commonly used in Windows VMs. For better compatibility with VNC, it is recommended to replace it with the more generic virtio video driver.
+ If your VM, especially a Windows guest, uses a QXL video device like this:
+
+ <video>
+ <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1' primary='yes'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
+ </video>
+
+ Replace the entire <video> block with a simpler virtio model definition:
+
+ <video>
+ <model type="virtio"/>
+ </video>
+
+
+ When you define the VM, &libvirt; will automatically fill in the necessary default values for the virtio video device, so you don't need to specify details like RAM or bus address.
+
+
+
+
+ Applying the new configuration and starting the VM
+ After you have finished editing the XML file and saved your changes, you need to make &libvirt; aware of the new configuration. This is done by first undefining the VM, which removes its old configuration from &libvirt;, and then defining it again using the modified XML file.
+
+
+ Undefine the existing VM configuration and then define it again with the modified XML file:
+ &prompt.sudo;virsh undefine <vm_name>
+ &prompt.sudo;virsh define <vm_name>.xml
+
+
+ Start the VM, which will be using VNC for its graphical console.
+ &prompt.sudo;virsh start <vm_name>
+
+
+
+
+
+ Restoring copy-paste functionality
+ A common feature available with Spice is copy-paste functionality between the host and the guest VM, which is handled by the Spice agent. After migrating to VNC, this functionality will be lost. However, you can restore it by using the QEMU guest agent.
+
+
+
+ Ensure you have updated gtk-vnc packages on your &slea; 16 host.
+ The versions shipped with the initial release of &slea; 16 may not have support for copy-paste over
+ VNC.
+
+
+
+ Inside the guest VM, ensure the QEMU guest agent is installed and the corresponding service is enabled to start on boot.
+ &prompt.sudo;systemctl enable qemu-guest-agent
+
+
+ Add a new channel definition to the VM's XML configuration. This channel allows communication with the QEMU guest agent for features like copy-paste.
+
+ <channel type="qemu-vdagent">
+ <source>
+ <clipboard copypaste="yes"/>
+ </source>
+ <target type="virtio" name="com.redhat.spice.0"/>
+ <address type="virtio-serial" controller="0" bus="0" port="1"/>
+ </channel>
+
+
+ Shut down and restart the VM for the changes to take effect.
+
+
+
+
diff --git a/references/virtualization-support.xml b/references/virtualization-support.xml
index feaebbfd3..71f95e082 100644
--- a/references/virtualization-support.xml
+++ b/references/virtualization-support.xml
@@ -393,7 +393,7 @@
- &sls; 15 SP3, 15 SP4, 15 SP5, 15 SP6, 15 SP6
+ &sls; 15 SP3, 15 SP4, 15 SP5, 15 SP6, 15 SP7
@@ -875,11 +875,9 @@
Confidential Computing
-
- &slsa; 15 SP6 includes kernel patches and tooling to
- enable Intel TDX Confidential Computing technology in the
- product. As this technology is not yet fully ready for a production
- environment, it is provided as a technology preview.
+
+ For information about Confidential computing, refer to the release notes:
+