summaryrefslogtreecommitdiffstats
path: root/meta-xilinx-virtualization
diff options
context:
space:
mode:
authorSandeep Gundlupet Raju <sandeep.gundlupet-raju@amd.com>2024-01-03 19:51:30 -0700
committerMark Hatle <mark.hatle@amd.com>2024-01-06 11:36:39 -0600
commite77c82c25590cbb93e6c5d66618bf0f1c79220dd (patch)
treec1d5ea2878a6b85cbab2cd5fdf888fc225a9635f /meta-xilinx-virtualization
parent14f302930c32c2589268bd26a51a209e0e3a4d41 (diff)
downloadmeta-xilinx-e77c82c25590cbb93e6c5d66618bf0f1c79220dd.tar.gz
qemu: Move xen recipes and files from petalinux layer
Move qemu xen recipes and files from meta-petalinux to xilinx-virtualization layer. Signed-off-by: Sandeep Gundlupet Raju <sandeep.gundlupet-raju@amd.com> Signed-off-by: Mark Hatle <mark.hatle@amd.com>
Diffstat (limited to 'meta-xilinx-virtualization')
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-native_%.bbappend1
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-system-native_%.bbappend1
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-tpm.inc4
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xen.inc33
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-native_%.bbappend1
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-system-native_%.bbappend1
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx_%.bbappend5
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-pt-fix-syntax-error-that-causes-FTBFS-in-some-co.patch40
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-when-unplugging-emulated-devices-skip-virtio-dev.patch51
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen_common-return-error-from-xen_create_ioreq_server.patch55
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-add-pseudo-RAM-region-for-grant-mappings.patch252
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-mapcache-move-xen-mapcache.c-to-hw-xen.patch88
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-hw-i386-xen-rearrange-xen_hvm_init_pc.patch106
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-softmmu-let-qemu_map_ram_ptr-use-qemu_ram_ptr_length.patch113
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch180
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-let-xen_ram_addr_from_mapcache-return-1-in-case-.patch49
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch150
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-xen-hvm-create-arch_handle_ioreq-and-arch_xen_set_me.patch192
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-add-map-and-unmap-callbacks-for-grant-region.patch255
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch2094
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-mapcache-Fix-build-on-Arm.patch37
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-skip-ioreq-creation-on-ioreq-registration-failur.patch42
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-accel-xen-xen-all-export-xenstore_record_dm_state.patch48
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-hw-arm-Add-grant-mapping.patch39
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0009-xen-hvm-enable-xen-hvm-common-build-for-ARM.patch43
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0010-hw-arm-introduce-xenpv-machine.patch230
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0011-meson.build-do-not-set-have_xen_pci_passthrough-for-.patch33
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0012-xen-arm-call-qemu_find_tpm_be-if-CONFIG_TPM.patch72
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0013-arm-xenpv-fix-TPM-address-print-warning.patch27
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0014-xen_arm-Create-virtio-mmio-devices-during-initializa.patch83
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0015-xen_arm-Initialize-RAM-and-add-hi-low-memory-regions.patch105
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0016-xen_arm-Add-accel-xen-and-drop-extra-interface-openi.patch79
-rw-r--r--meta-xilinx-virtualization/recipes-devtools/qemu/qemu_%.bbappend5
33 files changed, 4514 insertions, 0 deletions
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-native_%.bbappend b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-native_%.bbappend
new file mode 100644
index 00000000..e84844cf
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-native_%.bbappend
@@ -0,0 +1 @@
require qemu-tpm.inc
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-system-native_%.bbappend b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-system-native_%.bbappend
new file mode 100644
index 00000000..e84844cf
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-system-native_%.bbappend
@@ -0,0 +1 @@
require qemu-tpm.inc
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-tpm.inc b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-tpm.inc
new file mode 100644
index 00000000..a582b035
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-tpm.inc
@@ -0,0 +1,4 @@
1PACKAGECONFIG:append = "${@bb.utils.contains('DISTRO_FEATURES', 'tpm', ' tpm', '', d)}"
2
3PACKAGECONFIG[tpm] = "--enable-tpm,--disable-tpm,,swtpm libtpm"
4
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xen.inc b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xen.inc
new file mode 100644
index 00000000..6d1cd1eb
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xen.inc
@@ -0,0 +1,33 @@
1# Xen specific changes are only applicable on the target
2SRC_URI_XEN = ""
3SRC_URI_XEN:class-target = " \
4 file://0001-xen-pt-fix-syntax-error-that-causes-FTBFS-in-some-co.patch \
5 file://0001-xen_common-return-error-from-xen_create_ioreq_server.patch \
6 file://0002-xen-mapcache-move-xen-mapcache.c-to-hw-xen.patch \
7 file://0003-hw-i386-xen-rearrange-xen_hvm_init_pc.patch \
8 file://0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch \
9 file://0005-xen-hvm-create-arch_handle_ioreq-and-arch_xen_set_me.patch \
10 file://0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch \
11 file://0007-xen-skip-ioreq-creation-on-ioreq-registration-failur.patch \
12 file://0008-accel-xen-xen-all-export-xenstore_record_dm_state.patch \
13 file://0009-xen-hvm-enable-xen-hvm-common-build-for-ARM.patch \
14 file://0010-hw-arm-introduce-xenpv-machine.patch \
15 file://0011-meson.build-do-not-set-have_xen_pci_passthrough-for-.patch \
16 file://0012-xen-arm-call-qemu_find_tpm_be-if-CONFIG_TPM.patch \
17 file://0013-arm-xenpv-fix-TPM-address-print-warning.patch \
18 file://0014-xen_arm-Create-virtio-mmio-devices-during-initializa.patch \
19 file://0015-xen_arm-Initialize-RAM-and-add-hi-low-memory-regions.patch \
20 file://0016-xen_arm-Add-accel-xen-and-drop-extra-interface-openi.patch \
21 file://0001-xen-when-unplugging-emulated-devices-skip-virtio-dev.patch \
22 file://0002-xen-add-pseudo-RAM-region-for-grant-mappings.patch \
23 file://0003-softmmu-let-qemu_map_ram_ptr-use-qemu_ram_ptr_length.patch \
24 file://0004-xen-let-xen_ram_addr_from_mapcache-return-1-in-case-.patch \
25 file://0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch \
26 file://0006-xen-add-map-and-unmap-callbacks-for-grant-region.patch \
27 file://0007-xen-mapcache-Fix-build-on-Arm.patch \
28 file://0008-hw-arm-Add-grant-mapping.patch \
29 "
30
31FILESEXTRAPATHS:prepend:class-target := "${THISDIR}/qemu:"
32
33SRC_URI .= "${@bb.utils.contains('DISTRO_FEATURES', 'xen', '${SRC_URI_XEN}', '', d)}"
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-native_%.bbappend b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-native_%.bbappend
new file mode 100644
index 00000000..e84844cf
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-native_%.bbappend
@@ -0,0 +1 @@
require qemu-tpm.inc
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-system-native_%.bbappend b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-system-native_%.bbappend
new file mode 100644
index 00000000..e84844cf
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx-system-native_%.bbappend
@@ -0,0 +1 @@
require qemu-tpm.inc
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx_%.bbappend b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx_%.bbappend
new file mode 100644
index 00000000..3e93710c
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu-xilinx_%.bbappend
@@ -0,0 +1,5 @@
1require qemu-tpm.inc
2require qemu-xen.inc
3
4# We do not want QEMU, on the target to be configured with OpenGL
5PACKAGECONFIG:remove:class-target:petalinux = "virglrenderer epoxy gtk+"
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-pt-fix-syntax-error-that-causes-FTBFS-in-some-co.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-pt-fix-syntax-error-that-causes-FTBFS-in-some-co.patch
new file mode 100644
index 00000000..99eaeeaf
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-pt-fix-syntax-error-that-causes-FTBFS-in-some-co.patch
@@ -0,0 +1,40 @@
1From ba24456b93a205b728475d5f0880f3ec495e383a Mon Sep 17 00:00:00 2001
2From: Chuck Zmudzinski <brchuckz@aol.com>
3Date: Mon, 31 Oct 2022 17:35:52 -0400
4Subject: [PATCH] xen/pt: fix syntax error that causes FTBFS in some
5 configurations
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10When Qemu is built with --enable-xen and --disable-xen-pci-passthrough
11and the target os is linux, the build fails with:
12
13meson.build:3477:2: ERROR: File xen_pt_stub.c does not exist.
14
15Fixes: 582ea95f5f93 ("meson: convert hw/xen")
16
17Signed-off-by: Chuck Zmudzinski <brchuckz@aol.com>
18Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
19Message-Id: <5f1342a13c09af77b1a7b0aeaba5955bcea89731.1667242033.git.brchuckz@aol.com>
20Signed-off-by: Laurent Vivier <laurent@vivier.eu>
21---
22 hw/xen/meson.build | 2 +-
23 1 file changed, 1 insertion(+), 1 deletion(-)
24
25diff --git a/hw/xen/meson.build b/hw/xen/meson.build
26index 08dc1f6857..ae0ace3046 100644
27--- a/hw/xen/meson.build
28+++ b/hw/xen/meson.build
29@@ -18,7 +18,7 @@ if have_xen_pci_passthrough
30 'xen_pt_msi.c',
31 ))
32 else
33- xen_specific_ss.add('xen_pt_stub.c')
34+ xen_specific_ss.add(files('xen_pt_stub.c'))
35 endif
36
37 specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss)
38--
392.17.0
40
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-when-unplugging-emulated-devices-skip-virtio-dev.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-when-unplugging-emulated-devices-skip-virtio-dev.patch
new file mode 100644
index 00000000..71dfb3be
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen-when-unplugging-emulated-devices-skip-virtio-dev.patch
@@ -0,0 +1,51 @@
1From e2b85efc82bc26a838f666c8282528ee38cf6377 Mon Sep 17 00:00:00 2001
2From: Juergen Gross <jgross@suse.com>
3Date: Tue, 16 Mar 2021 14:00:33 +0100
4Subject: [PATCH 1/8] xen: when unplugging emulated devices skip virtio devices
5
6Virtio devices should never be unplugged at boot time, as they are
7similar to pci passthrough devices.
8
9Signed-off-by: Juergen Gross <jgross@suse.com>
10Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
11---
12 hw/i386/xen/xen_platform.c | 9 ++++++++-
13 1 file changed, 8 insertions(+), 1 deletion(-)
14
15diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
16index a64265cca0..39bbb12675 100644
17--- a/hw/i386/xen/xen_platform.c
18+++ b/hw/i386/xen/xen_platform.c
19@@ -30,6 +30,7 @@
20 #include "hw/pci/pci.h"
21 #include "hw/xen/xen_common.h"
22 #include "migration/vmstate.h"
23+#include "hw/virtio/virtio-bus.h"
24 #include "hw/xen/xen-legacy-backend.h"
25 #include "trace.h"
26 #include "sysemu/xen.h"
27@@ -114,7 +115,8 @@ static void unplug_nic(PCIBus *b, PCIDevice *d, void *o)
28 /* We have to ignore passthrough devices */
29 if (pci_get_word(d->config + PCI_CLASS_DEVICE) ==
30 PCI_CLASS_NETWORK_ETHERNET
31- && strcmp(d->name, "xen-pci-passthrough") != 0) {
32+ && strcmp(d->name, "xen-pci-passthrough") != 0
33+ && !qdev_get_child_bus(&d->qdev, TYPE_VIRTIO_BUS)) {
34 object_unparent(OBJECT(d));
35 }
36 }
37@@ -191,6 +193,11 @@ static void unplug_disks(PCIBus *b, PCIDevice *d, void *opaque)
38 return;
39 }
40
41+ /* Ignore virtio devices */
42+ if (qdev_get_child_bus(&d->qdev, TYPE_VIRTIO_BUS)) {
43+ return;
44+ }
45+
46 switch (pci_get_word(d->config + PCI_CLASS_DEVICE)) {
47 case PCI_CLASS_STORAGE_IDE:
48 pci_xen_ide_unplug(DEVICE(d), aux);
49--
502.25.1
51
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen_common-return-error-from-xen_create_ioreq_server.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen_common-return-error-from-xen_create_ioreq_server.patch
new file mode 100644
index 00000000..d4349b1d
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0001-xen_common-return-error-from-xen_create_ioreq_server.patch
@@ -0,0 +1,55 @@
1From ef4d512aff004c62d550cdd64329c6c1acd0f217 Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Fri, 1 Jul 2022 18:48:03 -0700
4Subject: [PATCH 01/16] xen_common: return error from xen_create_ioreq_server
5
6Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
7---
8 include/hw/xen/xen_common.h | 12 +++++++-----
9 1 file changed, 7 insertions(+), 5 deletions(-)
10
11diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h
12index 77ce17d8a4..c2d2f36bde 100644
13--- a/include/hw/xen/xen_common.h
14+++ b/include/hw/xen/xen_common.h
15@@ -467,8 +467,8 @@ static inline void xen_unmap_pcidev(domid_t dom,
16 {
17 }
18
19-static inline void xen_create_ioreq_server(domid_t dom,
20- ioservid_t *ioservid)
21+static inline int xen_create_ioreq_server(domid_t dom,
22+ ioservid_t *ioservid)
23 {
24 }
25
26@@ -600,8 +600,8 @@ static inline void xen_unmap_pcidev(domid_t dom,
27 PCI_FUNC(pci_dev->devfn));
28 }
29
30-static inline void xen_create_ioreq_server(domid_t dom,
31- ioservid_t *ioservid)
32+static inline int xen_create_ioreq_server(domid_t dom,
33+ ioservid_t *ioservid)
34 {
35 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
36 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
37@@ -609,12 +609,14 @@ static inline void xen_create_ioreq_server(domid_t dom,
38
39 if (rc == 0) {
40 trace_xen_ioreq_server_create(*ioservid);
41- return;
42+ return rc;
43 }
44
45 *ioservid = 0;
46 use_default_ioreq_server = true;
47 trace_xen_default_ioreq_server();
48+
49+ return rc;
50 }
51
52 static inline void xen_destroy_ioreq_server(domid_t dom,
53--
542.17.1
55
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-add-pseudo-RAM-region-for-grant-mappings.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-add-pseudo-RAM-region-for-grant-mappings.patch
new file mode 100644
index 00000000..8facb189
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-add-pseudo-RAM-region-for-grant-mappings.patch
@@ -0,0 +1,252 @@
1From e18daac2f6d3f60c8217f44189a91cf8240a591f Mon Sep 17 00:00:00 2001
2From: Juergen Gross <jgross@suse.com>
3Date: Thu, 20 May 2021 11:19:58 +0200
4Subject: [PATCH 2/8] xen: add pseudo RAM region for grant mappings
5
6Add a memory region which can be used to automatically map granted
7memory. It is starting at 0x8000000000000000ULL in order to be able to
8distinguish it from normal RAM.
9
10For this reason the xen.ram memory region is expanded, which has no
11further impact as it is used just as a container of the real RAM
12regions and now the grant region.
13
14Signed-off-by: Juergen Gross <jgross@suse.com>
15Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
16Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
17---
18 hw/i386/xen/xen-hvm.c | 3 ++
19 hw/xen/xen-hvm-common.c | 4 +--
20 hw/xen/xen-mapcache.c | 28 +++++++++++++++
21 include/exec/ram_addr.h | 1 +
22 include/hw/xen/xen-hvm-common.h | 2 ++
23 include/hw/xen/xen_pvdev.h | 3 ++
24 include/sysemu/xen-mapcache.h | 3 ++
25 softmmu/physmem.c | 61 ++++++++++++++++++++-------------
26 8 files changed, 80 insertions(+), 25 deletions(-)
27
28diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
29index 36d87555a9..2dcc2e1179 100644
30--- a/hw/i386/xen/xen-hvm.c
31+++ b/hw/i386/xen/xen-hvm.c
32@@ -171,6 +171,9 @@ static void xen_ram_init(PCMachineState *pcms,
33 x86ms->above_4g_mem_size);
34 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
35 }
36+
37+ /* Add grant mappings as a pseudo RAM region. */
38+ ram_grants = *xen_init_grant_ram();
39 }
40
41 static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
42diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
43index 7e7d23397f..abd6e379d3 100644
44--- a/hw/xen/xen-hvm-common.c
45+++ b/hw/xen/xen-hvm-common.c
46@@ -10,7 +10,7 @@
47 #include "hw/boards.h"
48 #include "hw/xen/arch_hvm.h"
49
50-MemoryRegion ram_memory;
51+MemoryRegion ram_memory, ram_grants;
52
53 MemoryListener xen_io_listener = {
54 .name = "xen-io",
55@@ -742,7 +742,7 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
56 return;
57 }
58
59- if (mr == &ram_memory) {
60+ if (mr == &ram_memory || mr == &ram_grants) {
61 return;
62 }
63
64diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
65index a2f93096e7..0b75f1633a 100644
66--- a/hw/xen/xen-mapcache.c
67+++ b/hw/xen/xen-mapcache.c
68@@ -14,7 +14,10 @@
69
70 #include <sys/resource.h>
71
72+#include "hw/xen/xen-hvm-common.h"
73 #include "hw/xen/xen-legacy-backend.h"
74+#include "hw/xen/xen_pvdev.h"
75+
76 #include "qemu/bitmap.h"
77
78 #include "sysemu/runstate.h"
79@@ -597,3 +600,28 @@ uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
80 mapcache_unlock();
81 return p;
82 }
83+
84+MemoryRegion *xen_init_grant_ram(void)
85+{
86+ RAMBlock *block;
87+
88+ memory_region_init(&ram_grants, NULL, "xen.grants",
89+ XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE);
90+ block = g_malloc0(sizeof(*block));
91+ block->mr = &ram_grants;
92+ block->used_length = XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE;
93+ block->max_length = XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE;
94+ block->fd = -1;
95+ block->page_size = XC_PAGE_SIZE;
96+ block->host = (void *)XEN_GRANT_ADDR_OFF;
97+ block->offset = XEN_GRANT_ADDR_OFF;
98+ block->flags = RAM_PREALLOC;
99+ ram_grants.ram_block = block;
100+ ram_grants.ram = true;
101+ ram_grants.terminates = true;
102+ ram_block_add_list(block);
103+ memory_region_add_subregion(get_system_memory(), XEN_GRANT_ADDR_OFF,
104+ &ram_grants);
105+
106+ return &ram_grants;
107+}
108diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
109index f3e0c78161..e60b055867 100644
110--- a/include/exec/ram_addr.h
111+++ b/include/exec/ram_addr.h
112@@ -137,6 +137,7 @@ void qemu_ram_free(RAMBlock *block);
113 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
114
115 void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
116+void ram_block_add_list(RAMBlock *new_block);
117
118 /* Clear whole block of mem */
119 static inline void qemu_ram_block_writeback(RAMBlock *block)
120diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
121index 2979f84ee2..6f7cc05d38 100644
122--- a/include/hw/xen/xen-hvm-common.h
123+++ b/include/hw/xen/xen-hvm-common.h
124@@ -16,6 +16,8 @@
125 #include <xen/hvm/ioreq.h>
126
127 extern MemoryRegion ram_memory;
128+
129+extern MemoryRegion ram_grants;
130 extern MemoryListener xen_io_listener;
131 extern DeviceListener xen_device_listener;
132
133diff --git a/include/hw/xen/xen_pvdev.h b/include/hw/xen/xen_pvdev.h
134index 7cd4bc2b82..36cd3ec1d4 100644
135--- a/include/hw/xen/xen_pvdev.h
136+++ b/include/hw/xen/xen_pvdev.h
137@@ -78,4 +78,7 @@ int xen_pv_send_notify(struct XenLegacyDevice *xendev);
138 void xen_pv_printf(struct XenLegacyDevice *xendev, int msg_level,
139 const char *fmt, ...) G_GNUC_PRINTF(3, 4);
140
141+#define XEN_GRANT_ADDR_OFF 0x8000000000000000ULL
142+#define XEN_MAX_VIRTIO_GRANTS 65536
143+
144 #endif /* QEMU_HW_XEN_PVDEV_H */
145diff --git a/include/sysemu/xen-mapcache.h b/include/sysemu/xen-mapcache.h
146index c8e7c2f6cf..f4bedb1c11 100644
147--- a/include/sysemu/xen-mapcache.h
148+++ b/include/sysemu/xen-mapcache.h
149@@ -10,6 +10,7 @@
150 #define XEN_MAPCACHE_H
151
152 #include "exec/cpu-common.h"
153+#include "exec/ram_addr.h"
154
155 typedef hwaddr (*phys_offset_to_gaddr_t)(hwaddr phys_offset,
156 ram_addr_t size);
157@@ -25,6 +26,8 @@ void xen_invalidate_map_cache(void);
158 uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
159 hwaddr new_phys_addr,
160 hwaddr size);
161+MemoryRegion *xen_init_grant_ram(void);
162+
163 #else
164
165 static inline void xen_map_cache_init(phys_offset_to_gaddr_t f,
166diff --git a/softmmu/physmem.c b/softmmu/physmem.c
167index dc3c3e5f2e..63ba5f7495 100644
168--- a/softmmu/physmem.c
169+++ b/softmmu/physmem.c
170@@ -1971,12 +1971,46 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
171 }
172 }
173
174+static void ram_block_add_list_locked(RAMBlock *new_block)
175+ {
176+ RAMBlock *block;
177+ RAMBlock *last_block = NULL;
178+
179+ /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
180+ * QLIST (which has an RCU-friendly variant) does not have insertion at
181+ * tail, so save the last element in last_block.
182+ */
183+ RAMBLOCK_FOREACH(block) {
184+ last_block = block;
185+ if (block->max_length < new_block->max_length) {
186+ break;
187+ }
188+ }
189+ if (block) {
190+ QLIST_INSERT_BEFORE_RCU(block, new_block, next);
191+ } else if (last_block) {
192+ QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
193+ } else { /* list is empty */
194+ QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
195+ }
196+ ram_list.mru_block = NULL;
197+
198+ /* Write list before version */
199+ smp_wmb();
200+ ram_list.version++;
201+}
202+
203+void ram_block_add_list(RAMBlock *new_block)
204+{
205+ qemu_mutex_lock_ramlist();
206+ ram_block_add_list_locked(new_block);
207+ qemu_mutex_unlock_ramlist();
208+}
209+
210 static void ram_block_add(RAMBlock *new_block, Error **errp)
211 {
212 const bool noreserve = qemu_ram_is_noreserve(new_block);
213 const bool shared = qemu_ram_is_shared(new_block);
214- RAMBlock *block;
215- RAMBlock *last_block = NULL;
216 ram_addr_t old_ram_size, new_ram_size;
217 Error *err = NULL;
218
219@@ -2014,28 +2048,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
220 if (new_ram_size > old_ram_size) {
221 dirty_memory_extend(old_ram_size, new_ram_size);
222 }
223- /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
224- * QLIST (which has an RCU-friendly variant) does not have insertion at
225- * tail, so save the last element in last_block.
226- */
227- RAMBLOCK_FOREACH(block) {
228- last_block = block;
229- if (block->max_length < new_block->max_length) {
230- break;
231- }
232- }
233- if (block) {
234- QLIST_INSERT_BEFORE_RCU(block, new_block, next);
235- } else if (last_block) {
236- QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
237- } else { /* list is empty */
238- QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
239- }
240- ram_list.mru_block = NULL;
241
242- /* Write list before version */
243- smp_wmb();
244- ram_list.version++;
245+ ram_block_add_list_locked(new_block);
246+
247 qemu_mutex_unlock_ramlist();
248
249 cpu_physical_memory_set_dirty_range(new_block->offset,
250--
2512.25.1
252
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-mapcache-move-xen-mapcache.c-to-hw-xen.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-mapcache-move-xen-mapcache.c-to-hw-xen.patch
new file mode 100644
index 00000000..35ca6df4
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0002-xen-mapcache-move-xen-mapcache.c-to-hw-xen.patch
@@ -0,0 +1,88 @@
1From 423468bdb3728154e95af18ef755bc75c5d59a3a Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Thu, 30 Jun 2022 18:19:50 -0700
4Subject: [PATCH 02/16] xen-mapcache: move xen-mapcache.c to hw/xen
5
6xen-mapcache.c contains common functions which are useful for Xen on ARM
7IOREQ handling. Moving it out of i386 to hw/xen for commong access.
8
9Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
10Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
11Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
12---
13 hw/i386/meson.build | 1 +
14 hw/i386/xen/meson.build | 1 -
15 hw/i386/xen/trace-events | 5 -----
16 hw/xen/meson.build | 4 ++++
17 hw/xen/trace-events | 5 +++++
18 hw/{i386 => }/xen/xen-mapcache.c | 0
19 6 files changed, 10 insertions(+), 6 deletions(-)
20 rename hw/{i386 => }/xen/xen-mapcache.c (100%)
21
22diff --git a/hw/i386/meson.build b/hw/i386/meson.build
23index 213e2e82b3..cfdbfdcbcb 100644
24--- a/hw/i386/meson.build
25+++ b/hw/i386/meson.build
26@@ -33,5 +33,6 @@ subdir('kvm')
27 subdir('xen')
28
29 i386_ss.add_all(xenpv_ss)
30+i386_ss.add_all(xen_ss)
31
32 hw_arch += {'i386': i386_ss}
33diff --git a/hw/i386/xen/meson.build b/hw/i386/xen/meson.build
34index be84130300..2fcc46e6ca 100644
35--- a/hw/i386/xen/meson.build
36+++ b/hw/i386/xen/meson.build
37@@ -1,6 +1,5 @@
38 i386_ss.add(when: 'CONFIG_XEN', if_true: files(
39 'xen-hvm.c',
40- 'xen-mapcache.c',
41 'xen_apic.c',
42 'xen_platform.c',
43 'xen_pvdevice.c',
44diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events
45index 5d6be61090..a0c89d91c4 100644
46--- a/hw/i386/xen/trace-events
47+++ b/hw/i386/xen/trace-events
48@@ -21,8 +21,3 @@ xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p"
49 cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
50 cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
51
52-# xen-mapcache.c
53-xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64
54-xen_remap_bucket(uint64_t index) "index 0x%"PRIx64
55-xen_map_cache_return(void* ptr) "%p"
56-
57diff --git a/hw/xen/meson.build b/hw/xen/meson.build
58index ae0ace3046..19d0637c46 100644
59--- a/hw/xen/meson.build
60+++ b/hw/xen/meson.build
61@@ -22,3 +22,7 @@ else
62 endif
63
64 specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss)
65+
66+xen_ss = ss.source_set()
67+
68+xen_ss.add(when: 'CONFIG_XEN', if_true: files('xen-mapcache.c'))
69diff --git a/hw/xen/trace-events b/hw/xen/trace-events
70index 3da3fd8348..2c8f238f42 100644
71--- a/hw/xen/trace-events
72+++ b/hw/xen/trace-events
73@@ -41,3 +41,8 @@ xs_node_vprintf(char *path, char *value) "%s %s"
74 xs_node_vscanf(char *path, char *value) "%s %s"
75 xs_node_watch(char *path) "%s"
76 xs_node_unwatch(char *path) "%s"
77+
78+# xen-mapcache.c
79+xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64
80+xen_remap_bucket(uint64_t index) "index 0x%"PRIx64
81+xen_map_cache_return(void* ptr) "%p"
82diff --git a/hw/i386/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
83similarity index 100%
84rename from hw/i386/xen/xen-mapcache.c
85rename to hw/xen/xen-mapcache.c
86--
872.17.1
88
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-hw-i386-xen-rearrange-xen_hvm_init_pc.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-hw-i386-xen-rearrange-xen_hvm_init_pc.patch
new file mode 100644
index 00000000..1113cf39
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-hw-i386-xen-rearrange-xen_hvm_init_pc.patch
@@ -0,0 +1,106 @@
1From 4472924c800e9dbf46e4c2432565d3e406b35d27 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 1 Jul 2022 16:32:33 -0700
4Subject: [PATCH 03/16] hw/i386/xen: rearrange xen_hvm_init_pc
5
6Move references to:
7- xen_get_vmport_regs_pfn
8- xen_suspend_notifier
9- xen_wakeup_notifier
10- xen_ram_init
11
12towards the end of the function. This is done to keep the the common
13ioreq functions in one place which will be moved to new function in next
14patch in order to make it useful to ARM machines also.
15
16Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
17Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
18Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
19---
20 hw/i386/xen/xen-hvm.c | 49 ++++++++++++++++++++++---------------------
21 1 file changed, 25 insertions(+), 24 deletions(-)
22
23diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
24index e4293d6d66..b27484ad22 100644
25--- a/hw/i386/xen/xen-hvm.c
26+++ b/hw/i386/xen/xen-hvm.c
27@@ -1416,12 +1416,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
28 state->exit.notify = xen_exit_notifier;
29 qemu_add_exit_notifier(&state->exit);
30
31- state->suspend.notify = xen_suspend_notifier;
32- qemu_register_suspend_notifier(&state->suspend);
33-
34- state->wakeup.notify = xen_wakeup_notifier;
35- qemu_register_wakeup_notifier(&state->wakeup);
36-
37 /*
38 * Register wake-up support in QMP query-current-machine API
39 */
40@@ -1432,23 +1426,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
41 goto err;
42 }
43
44- rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
45- if (!rc) {
46- DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
47- state->shared_vmport_page =
48- xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
49- 1, &ioreq_pfn, NULL);
50- if (state->shared_vmport_page == NULL) {
51- error_report("map shared vmport IO page returned error %d handle=%p",
52- errno, xen_xc);
53- goto err;
54- }
55- } else if (rc != -ENOSYS) {
56- error_report("get vmport regs pfn returned error %d, rc=%d",
57- errno, rc);
58- goto err;
59- }
60-
61 /* Note: cpus is empty at this point in init */
62 state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
63
64@@ -1486,7 +1463,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
65 #else
66 xen_map_cache_init(NULL, state);
67 #endif
68- xen_ram_init(pcms, ms->ram_size, ram_memory);
69
70 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
71
72@@ -1513,6 +1489,31 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
73 QLIST_INIT(&xen_physmap);
74 xen_read_physmap(state);
75
76+ state->suspend.notify = xen_suspend_notifier;
77+ qemu_register_suspend_notifier(&state->suspend);
78+
79+ state->wakeup.notify = xen_wakeup_notifier;
80+ qemu_register_wakeup_notifier(&state->wakeup);
81+
82+ rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
83+ if (!rc) {
84+ DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
85+ state->shared_vmport_page =
86+ xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
87+ 1, &ioreq_pfn, NULL);
88+ if (state->shared_vmport_page == NULL) {
89+ error_report("map shared vmport IO page returned error %d handle=%p",
90+ errno, xen_xc);
91+ goto err;
92+ }
93+ } else if (rc != -ENOSYS) {
94+ error_report("get vmport regs pfn returned error %d, rc=%d",
95+ errno, rc);
96+ goto err;
97+ }
98+
99+ xen_ram_init(pcms, ms->ram_size, ram_memory);
100+
101 /* Disable ACPI build because Xen handles it */
102 pcms->acpi_build_enabled = false;
103
104--
1052.17.1
106
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-softmmu-let-qemu_map_ram_ptr-use-qemu_ram_ptr_length.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-softmmu-let-qemu_map_ram_ptr-use-qemu_ram_ptr_length.patch
new file mode 100644
index 00000000..bff815bc
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0003-softmmu-let-qemu_map_ram_ptr-use-qemu_ram_ptr_length.patch
@@ -0,0 +1,113 @@
1From cb4be1f7185c5974523c764f3f6efe3af6633d71 Mon Sep 17 00:00:00 2001
2From: Juergen Gross <jgross@suse.com>
3Date: Thu, 20 May 2021 11:54:48 +0200
4Subject: [PATCH 3/8] softmmu: let qemu_map_ram_ptr() use qemu_ram_ptr_length()
5
6qemu_map_ram_ptr() and qemu_ram_ptr_length() share quite some code, so
7modify qemu_ram_ptr_length() a little bit and use it for
8qemu_map_ram_ptr(), too.
9
10Signed-off-by: Juergen Gross <jgross@suse.com>
11Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
12---
13 softmmu/physmem.c | 56 ++++++++++++++++++-----------------------------
14 1 file changed, 21 insertions(+), 35 deletions(-)
15
16diff --git a/softmmu/physmem.c b/softmmu/physmem.c
17index 63ba5f7495..439a53a1be 100644
18--- a/softmmu/physmem.c
19+++ b/softmmu/physmem.c
20@@ -2306,38 +2306,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
21 }
22 #endif /* !_WIN32 */
23
24-/* Return a host pointer to ram allocated with qemu_ram_alloc.
25- * This should not be used for general purpose DMA. Use address_space_map
26- * or address_space_rw instead. For local memory (e.g. video ram) that the
27- * device owns, use memory_region_get_ram_ptr.
28- *
29- * Called within RCU critical section.
30- */
31-void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
32-{
33- RAMBlock *block = ram_block;
34-
35- if (block == NULL) {
36- block = qemu_get_ram_block(addr);
37- addr -= block->offset;
38- }
39-
40- if (xen_enabled() && block->host == NULL) {
41- /* We need to check if the requested address is in the RAM
42- * because we don't want to map the entire memory in QEMU.
43- * In that case just map until the end of the page.
44- */
45- if (block->offset == 0) {
46- return xen_map_cache(addr, 0, 0, false);
47- }
48-
49- block->host = xen_map_cache(block->offset, block->max_length, 1, false);
50- }
51- return ramblock_ptr(block, addr);
52-}
53-
54-/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
55- * but takes a size argument.
56+/* Return a host pointer to guest's ram.
57 *
58 * Called within RCU critical section.
59 */
60@@ -2345,7 +2314,9 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
61 hwaddr *size, bool lock)
62 {
63 RAMBlock *block = ram_block;
64- if (*size == 0) {
65+ hwaddr len = 0;
66+
67+ if (size && *size == 0) {
68 return NULL;
69 }
70
71@@ -2353,7 +2324,10 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
72 block = qemu_get_ram_block(addr);
73 addr -= block->offset;
74 }
75- *size = MIN(*size, block->max_length - addr);
76+ if (size) {
77+ *size = MIN(*size, block->max_length - addr);
78+ len = *size;
79+ }
80
81 if (xen_enabled() && block->host == NULL) {
82 /* We need to check if the requested address is in the RAM
83@@ -2361,7 +2335,7 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
84 * In that case just map the requested area.
85 */
86 if (block->offset == 0) {
87- return xen_map_cache(addr, *size, lock, lock);
88+ return xen_map_cache(addr, len, lock, lock);
89 }
90
91 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
92@@ -2370,6 +2344,18 @@ static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
93 return ramblock_ptr(block, addr);
94 }
95
96+/* Return a host pointer to ram allocated with qemu_ram_alloc.
97+ * This should not be used for general purpose DMA. Use address_space_map
98+ * or address_space_rw instead. For local memory (e.g. video ram) that the
99+ * device owns, use memory_region_get_ram_ptr.
100+ *
101+ * Called within RCU critical section.
102+ */
103+void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
104+{
105+ return qemu_ram_ptr_length(ram_block, addr, NULL, false);
106+}
107+
108 /* Return the offset of a hostpointer within a ramblock */
109 ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host)
110 {
111--
1122.25.1
113
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch
new file mode 100644
index 00000000..4337e0c8
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-hvm-move-x86-specific-fields-out-of-XenIOState.patch
@@ -0,0 +1,180 @@
1From 2a01fa06d267f68148d3a6df50675edfe090601a Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Fri, 1 Jul 2022 18:16:52 -0700
4Subject: [PATCH 04/16] xen-hvm: move x86-specific fields out of XenIOState
5
6Move:
7- shared_vmport_page
8- log_for_dirtybit
9- dirty_bitmap
10- suspend
11- wakeup
12
13out of XenIOState as they are only used on x86, especially the ones
14related to dirty logging.
15
16Remove free_phys_offset that was unused.
17
18Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
19---
20 hw/i386/xen/xen-hvm.c | 58 ++++++++++++++++++++-----------------------
21 1 file changed, 27 insertions(+), 31 deletions(-)
22
23diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
24index b27484ad22..225cfdf8b7 100644
25--- a/hw/i386/xen/xen-hvm.c
26+++ b/hw/i386/xen/xen-hvm.c
27@@ -73,6 +73,7 @@ struct shared_vmport_iopage {
28 };
29 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
30 #endif
31+static shared_vmport_iopage_t *shared_vmport_page;
32
33 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
34 {
35@@ -95,6 +96,11 @@ typedef struct XenPhysmap {
36 } XenPhysmap;
37
38 static QLIST_HEAD(, XenPhysmap) xen_physmap;
39+static const XenPhysmap *log_for_dirtybit = NULL;
40+/* Buffer used by xen_sync_dirty_bitmap */
41+static unsigned long *dirty_bitmap = NULL;
42+static Notifier suspend;
43+static Notifier wakeup;
44
45 typedef struct XenPciDevice {
46 PCIDevice *pci_dev;
47@@ -105,7 +111,6 @@ typedef struct XenPciDevice {
48 typedef struct XenIOState {
49 ioservid_t ioservid;
50 shared_iopage_t *shared_page;
51- shared_vmport_iopage_t *shared_vmport_page;
52 buffered_iopage_t *buffered_io_page;
53 xenforeignmemory_resource_handle *fres;
54 QEMUTimer *buffered_io_timer;
55@@ -125,14 +130,8 @@ typedef struct XenIOState {
56 MemoryListener io_listener;
57 QLIST_HEAD(, XenPciDevice) dev_list;
58 DeviceListener device_listener;
59- hwaddr free_phys_offset;
60- const XenPhysmap *log_for_dirtybit;
61- /* Buffer used by xen_sync_dirty_bitmap */
62- unsigned long *dirty_bitmap;
63
64 Notifier exit;
65- Notifier suspend;
66- Notifier wakeup;
67 } XenIOState;
68
69 /* Xen specific function for piix pci */
70@@ -462,10 +461,10 @@ static int xen_remove_from_physmap(XenIOState *state,
71 }
72
73 QLIST_REMOVE(physmap, list);
74- if (state->log_for_dirtybit == physmap) {
75- state->log_for_dirtybit = NULL;
76- g_free(state->dirty_bitmap);
77- state->dirty_bitmap = NULL;
78+ if (log_for_dirtybit == physmap) {
79+ log_for_dirtybit = NULL;
80+ g_free(dirty_bitmap);
81+ dirty_bitmap = NULL;
82 }
83 g_free(physmap);
84
85@@ -626,16 +625,16 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
86 return;
87 }
88
89- if (state->log_for_dirtybit == NULL) {
90- state->log_for_dirtybit = physmap;
91- state->dirty_bitmap = g_new(unsigned long, bitmap_size);
92- } else if (state->log_for_dirtybit != physmap) {
93+ if (log_for_dirtybit == NULL) {
94+ log_for_dirtybit = physmap;
95+ dirty_bitmap = g_new(unsigned long, bitmap_size);
96+ } else if (log_for_dirtybit != physmap) {
97 /* Only one range for dirty bitmap can be tracked. */
98 return;
99 }
100
101 rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
102- npages, state->dirty_bitmap);
103+ npages, dirty_bitmap);
104 if (rc < 0) {
105 #ifndef ENODATA
106 #define ENODATA ENOENT
107@@ -650,7 +649,7 @@ static void xen_sync_dirty_bitmap(XenIOState *state,
108 }
109
110 for (i = 0; i < bitmap_size; i++) {
111- unsigned long map = state->dirty_bitmap[i];
112+ unsigned long map = dirty_bitmap[i];
113 while (map != 0) {
114 j = ctzl(map);
115 map &= ~(1ul << j);
116@@ -676,12 +675,10 @@ static void xen_log_start(MemoryListener *listener,
117 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
118 int old, int new)
119 {
120- XenIOState *state = container_of(listener, XenIOState, memory_listener);
121-
122 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
123- state->log_for_dirtybit = NULL;
124- g_free(state->dirty_bitmap);
125- state->dirty_bitmap = NULL;
126+ log_for_dirtybit = NULL;
127+ g_free(dirty_bitmap);
128+ dirty_bitmap = NULL;
129 /* Disable dirty bit tracking */
130 xen_track_dirty_vram(xen_domid, 0, 0, NULL);
131 }
132@@ -1021,9 +1018,9 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
133 {
134 vmware_regs_t *vmport_regs;
135
136- assert(state->shared_vmport_page);
137+ assert(shared_vmport_page);
138 vmport_regs =
139- &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
140+ &shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
141 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
142
143 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
144@@ -1468,7 +1465,6 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
145
146 state->memory_listener = xen_memory_listener;
147 memory_listener_register(&state->memory_listener, &address_space_memory);
148- state->log_for_dirtybit = NULL;
149
150 state->io_listener = xen_io_listener;
151 memory_listener_register(&state->io_listener, &address_space_io);
152@@ -1489,19 +1485,19 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
153 QLIST_INIT(&xen_physmap);
154 xen_read_physmap(state);
155
156- state->suspend.notify = xen_suspend_notifier;
157- qemu_register_suspend_notifier(&state->suspend);
158+ suspend.notify = xen_suspend_notifier;
159+ qemu_register_suspend_notifier(&suspend);
160
161- state->wakeup.notify = xen_wakeup_notifier;
162- qemu_register_wakeup_notifier(&state->wakeup);
163+ wakeup.notify = xen_wakeup_notifier;
164+ qemu_register_wakeup_notifier(&wakeup);
165
166 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
167 if (!rc) {
168 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
169- state->shared_vmport_page =
170+ shared_vmport_page =
171 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
172 1, &ioreq_pfn, NULL);
173- if (state->shared_vmport_page == NULL) {
174+ if (shared_vmport_page == NULL) {
175 error_report("map shared vmport IO page returned error %d handle=%p",
176 errno, xen_xc);
177 goto err;
178--
1792.17.1
180
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-let-xen_ram_addr_from_mapcache-return-1-in-case-.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-let-xen_ram_addr_from_mapcache-return-1-in-case-.patch
new file mode 100644
index 00000000..25dc0ae0
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0004-xen-let-xen_ram_addr_from_mapcache-return-1-in-case-.patch
@@ -0,0 +1,49 @@
1From 7dfa8828bd2e61fc5bf2bf6294aad16b2bf4ff8a Mon Sep 17 00:00:00 2001
2From: Juergen Gross <jgross@suse.com>
3Date: Thu, 20 May 2021 13:31:32 +0200
4Subject: [PATCH 4/8] xen: let xen_ram_addr_from_mapcache() return -1 in case
5 of not found entry
6
7Today xen_ram_addr_from_mapcache() will either abort() or return 0 in
8case it can't find a matching entry for a pointer value. Both cases
9are bad, so change that to return an invalid address instead.
10
11Signed-off-by: Juergen Gross <jgross@suse.com>
12Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
13---
14 hw/xen/xen-mapcache.c | 12 +++---------
15 1 file changed, 3 insertions(+), 9 deletions(-)
16
17diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
18index 0b75f1633a..e53e7221f1 100644
19--- a/hw/xen/xen-mapcache.c
20+++ b/hw/xen/xen-mapcache.c
21@@ -405,13 +405,8 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
22 }
23 }
24 if (!found) {
25- fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
26- QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
27- DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
28- reventry->vaddr_req);
29- }
30- abort();
31- return 0;
32+ mapcache_unlock();
33+ return RAM_ADDR_INVALID;
34 }
35
36 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
37@@ -419,8 +414,7 @@ ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
38 entry = entry->next;
39 }
40 if (!entry) {
41- DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
42- raddr = 0;
43+ raddr = RAM_ADDR_INVALID;
44 } else {
45 raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
46 ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
47--
482.25.1
49
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch
new file mode 100644
index 00000000..db6d8fe5
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-memory-add-MemoryRegion-map-and-unmap-callbacks.patch
@@ -0,0 +1,150 @@
1From bd32a130ca633eae7cf0f4ff0fa856004d413df0 Mon Sep 17 00:00:00 2001
2From: Juergen Gross <jgross@suse.com>
3Date: Thu, 27 May 2021 15:27:55 +0200
4Subject: [PATCH 5/8] memory: add MemoryRegion map and unmap callbacks
5
6In order to support mapping and unmapping guest memory dynamically to
7and from qemu during address_space_[un]map() operations add the map()
8and unmap() callbacks to MemoryRegionOps.
9
10Those will be used e.g. for Xen grant mappings when performing guest
11I/Os.
12
13Signed-off-by: Juergen Gross <jgross@suse.com>
14Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
15Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
16---
17 include/exec/memory.h | 19 +++++++++++++++++
18 softmmu/physmem.c | 47 +++++++++++++++++++++++++++++++++----------
19 2 files changed, 55 insertions(+), 11 deletions(-)
20
21diff --git a/include/exec/memory.h b/include/exec/memory.h
22index bfb1de8eea..19e2aac694 100644
23--- a/include/exec/memory.h
24+++ b/include/exec/memory.h
25@@ -245,6 +245,25 @@ struct MemoryRegionOps {
26 unsigned size,
27 MemTxAttrs attrs);
28
29+ /* Dynamically create mapping. @addr is the guest address to map; @plen
30+ * is the pointer to the usable length of the buffer.
31+ * @mr contents can be changed in case a new memory region is created for
32+ * the mapping.
33+ * Returns the buffer address for accessing the data. */
34+ void *(*map)(MemoryRegion **mr,
35+ hwaddr addr,
36+ hwaddr *plen,
37+ bool is_write,
38+ MemTxAttrs attrs);
39+
40+ /* Unmap an area obtained via map() before. */
41+ void (*unmap)(MemoryRegion *mr,
42+ void *buffer,
43+ ram_addr_t addr,
44+ hwaddr len,
45+ bool is_write,
46+ hwaddr access_len);
47+
48 enum device_endian endianness;
49 /* Guest-visible constraints: */
50 struct {
51diff --git a/softmmu/physmem.c b/softmmu/physmem.c
52index 439a53a1be..2038240311 100644
53--- a/softmmu/physmem.c
54+++ b/softmmu/physmem.c
55@@ -3237,7 +3237,7 @@ void *address_space_map(AddressSpace *as,
56 hwaddr len = *plen;
57 hwaddr l, xlat;
58 MemoryRegion *mr;
59- void *ptr;
60+ void *ptr = NULL;
61 FlatView *fv;
62
63 if (len == 0) {
64@@ -3273,10 +3273,17 @@ void *address_space_map(AddressSpace *as,
65
66
67 memory_region_ref(mr);
68+
69+ if (mr->ops && mr->ops->map) {
70+ ptr = mr->ops->map(&mr, addr, plen, is_write, attrs);
71+ }
72+
73 *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
74 l, is_write, attrs);
75 fuzz_dma_read_cb(addr, *plen, mr);
76- ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
77+ if (ptr == NULL) {
78+ ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
79+ }
80
81 return ptr;
82 }
83@@ -3294,11 +3301,16 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
84
85 mr = memory_region_from_host(buffer, &addr1);
86 assert(mr != NULL);
87- if (is_write) {
88- invalidate_and_set_dirty(mr, addr1, access_len);
89- }
90- if (xen_enabled()) {
91- xen_invalidate_map_cache_entry(buffer);
92+
93+ if (mr->ops && mr->ops->unmap) {
94+ mr->ops->unmap(mr, buffer, addr1, len, is_write, access_len);
95+ } else {
96+ if (is_write) {
97+ invalidate_and_set_dirty(mr, addr1, access_len);
98+ }
99+ if (xen_enabled()) {
100+ xen_invalidate_map_cache_entry(buffer);
101+ }
102 }
103 memory_region_unref(mr);
104 return;
105@@ -3370,10 +3382,17 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
106 * doing this if we found actual RAM, which behaves the same
107 * regardless of attributes; so UNSPECIFIED is fine.
108 */
109+ if (mr->ops && mr->ops->map) {
110+ cache->ptr = mr->ops->map(&mr, addr, &l, is_write,
111+ MEMTXATTRS_UNSPECIFIED);
112+ }
113+
114 l = flatview_extend_translation(cache->fv, addr, len, mr,
115 cache->xlat, l, is_write,
116 MEMTXATTRS_UNSPECIFIED);
117- cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
118+ if (!cache->ptr) {
119+ cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true);
120+ }
121 } else {
122 cache->ptr = NULL;
123 }
124@@ -3395,14 +3414,20 @@ void address_space_cache_invalidate(MemoryRegionCache *cache,
125
126 void address_space_cache_destroy(MemoryRegionCache *cache)
127 {
128- if (!cache->mrs.mr) {
129+ MemoryRegion *mr = cache->mrs.mr;
130+
131+ if (!mr) {
132 return;
133 }
134
135- if (xen_enabled()) {
136+ if (mr->ops && mr->ops->unmap) {
137+ mr->ops->unmap(mr, cache->ptr, cache->xlat, cache->len,
138+ cache->is_write, cache->len);
139+ } else if (xen_enabled()) {
140 xen_invalidate_map_cache_entry(cache->ptr);
141 }
142- memory_region_unref(cache->mrs.mr);
143+
144+ memory_region_unref(mr);
145 flatview_unref(cache->fv);
146 cache->mrs.mr = NULL;
147 cache->fv = NULL;
148--
1492.25.1
150
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-xen-hvm-create-arch_handle_ioreq-and-arch_xen_set_me.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-xen-hvm-create-arch_handle_ioreq-and-arch_xen_set_me.patch
new file mode 100644
index 00000000..6b56a39e
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0005-xen-hvm-create-arch_handle_ioreq-and-arch_xen_set_me.patch
@@ -0,0 +1,192 @@
1From c38436434fc888ba8844d99eab451f9b734e5e5b Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Fri, 1 Jul 2022 19:34:39 -0700
4Subject: [PATCH 05/16] xen-hvm: create arch_handle_ioreq and
5 arch_xen_set_memory
6
7In preparation to moving most of xen-hvm code to an arch-neutral
8location, move the x86-specific portion of xen_set_memory to
9arch_xen_set_memory.
10
11Also move handle_vmport_ioreq to arch_handle_ioreq.
12
13Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
14Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
15Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
16---
17 hw/i386/xen/xen-hvm.c | 98 ++++++++++++++++++++--------------
18 include/hw/i386/xen_arch_hvm.h | 10 ++++
19 include/hw/xen/arch_hvm.h | 3 ++
20 3 files changed, 71 insertions(+), 40 deletions(-)
21 create mode 100644 include/hw/i386/xen_arch_hvm.h
22 create mode 100644 include/hw/xen/arch_hvm.h
23
24diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
25index 225cfdf8b7..178f0c68fc 100644
26--- a/hw/i386/xen/xen-hvm.c
27+++ b/hw/i386/xen/xen-hvm.c
28@@ -134,6 +134,8 @@ typedef struct XenIOState {
29 Notifier exit;
30 } XenIOState;
31
32+#include "hw/xen/arch_hvm.h"
33+
34 /* Xen specific function for piix pci */
35
36 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
37@@ -476,10 +478,6 @@ static void xen_set_memory(struct MemoryListener *listener,
38 bool add)
39 {
40 XenIOState *state = container_of(listener, XenIOState, memory_listener);
41- hwaddr start_addr = section->offset_within_address_space;
42- ram_addr_t size = int128_get64(section->size);
43- bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
44- hvmmem_type_t mem_type;
45
46 if (section->mr == &ram_memory) {
47 return;
48@@ -492,38 +490,7 @@ static void xen_set_memory(struct MemoryListener *listener,
49 section);
50 }
51 }
52-
53- if (!memory_region_is_ram(section->mr)) {
54- return;
55- }
56-
57- if (log_dirty != add) {
58- return;
59- }
60-
61- trace_xen_client_set_memory(start_addr, size, log_dirty);
62-
63- start_addr &= TARGET_PAGE_MASK;
64- size = TARGET_PAGE_ALIGN(size);
65-
66- if (add) {
67- if (!memory_region_is_rom(section->mr)) {
68- xen_add_to_physmap(state, start_addr, size,
69- section->mr, section->offset_within_region);
70- } else {
71- mem_type = HVMMEM_ram_ro;
72- if (xen_set_mem_type(xen_domid, mem_type,
73- start_addr >> TARGET_PAGE_BITS,
74- size >> TARGET_PAGE_BITS)) {
75- DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n",
76- start_addr);
77- }
78- }
79- } else {
80- if (xen_remove_from_physmap(state, start_addr, size) < 0) {
81- DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
82- }
83- }
84+ arch_xen_set_memory(state, section, add);
85 }
86
87 static void xen_region_add(MemoryListener *listener,
88@@ -1051,9 +1018,6 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req)
89 case IOREQ_TYPE_COPY:
90 cpu_ioreq_move(req);
91 break;
92- case IOREQ_TYPE_VMWARE_PORT:
93- handle_vmport_ioreq(state, req);
94- break;
95 case IOREQ_TYPE_TIMEOFFSET:
96 break;
97 case IOREQ_TYPE_INVALIDATE:
98@@ -1063,7 +1027,7 @@ static void handle_ioreq(XenIOState *state, ioreq_t *req)
99 cpu_ioreq_config(state, req);
100 break;
101 default:
102- hw_error("Invalid ioreq type 0x%x\n", req->type);
103+ arch_handle_ioreq(state, req);
104 }
105 if (req->dir == IOREQ_READ) {
106 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
107@@ -1604,3 +1568,57 @@ void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
108 memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION);
109 }
110 }
111+
112+void arch_xen_set_memory(XenIOState *state,MemoryRegionSection *section,
113+ bool add)
114+{
115+ hwaddr start_addr = section->offset_within_address_space;
116+ ram_addr_t size = int128_get64(section->size);
117+ bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
118+ hvmmem_type_t mem_type;
119+
120+ if (!memory_region_is_ram(section->mr)) {
121+ return;
122+ }
123+
124+ if (log_dirty != add) {
125+ return;
126+ }
127+
128+ trace_xen_client_set_memory(start_addr, size, log_dirty);
129+
130+ start_addr &= TARGET_PAGE_MASK;
131+ size = TARGET_PAGE_ALIGN(size);
132+
133+ if (add) {
134+ if (!memory_region_is_rom(section->mr)) {
135+ xen_add_to_physmap(state, start_addr, size,
136+ section->mr, section->offset_within_region);
137+ } else {
138+ mem_type = HVMMEM_ram_ro;
139+ if (xen_set_mem_type(xen_domid, mem_type,
140+ start_addr >> TARGET_PAGE_BITS,
141+ size >> TARGET_PAGE_BITS)) {
142+ DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n",
143+ start_addr);
144+ }
145+ }
146+ } else {
147+ if (xen_remove_from_physmap(state, start_addr, size) < 0) {
148+ DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
149+ }
150+ }
151+}
152+
153+void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
154+{
155+ switch (req->type) {
156+ case IOREQ_TYPE_VMWARE_PORT:
157+ handle_vmport_ioreq(state, req);
158+ break;
159+ default:
160+ hw_error("Invalid ioreq type 0x%x\n", req->type);
161+ }
162+
163+ return;
164+}
165diff --git a/include/hw/i386/xen_arch_hvm.h b/include/hw/i386/xen_arch_hvm.h
166new file mode 100644
167index 0000000000..1b2c71ba4f
168--- /dev/null
169+++ b/include/hw/i386/xen_arch_hvm.h
170@@ -0,0 +1,10 @@
171+#ifndef HW_XEN_ARCH_I386_HVM_H
172+#define HW_XEN_ARCH_I386_HVM_H
173+
174+#include <xen/hvm/ioreq.h>
175+
176+void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
177+void arch_xen_set_memory(XenIOState *state,
178+ MemoryRegionSection *section,
179+ bool add);
180+#endif
181diff --git a/include/hw/xen/arch_hvm.h b/include/hw/xen/arch_hvm.h
182new file mode 100644
183index 0000000000..26674648d8
184--- /dev/null
185+++ b/include/hw/xen/arch_hvm.h
186@@ -0,0 +1,3 @@
187+#if defined(TARGET_I386) || defined(TARGET_X86_64)
188+#include "hw/i386/xen_arch_hvm.h"
189+#endif
190--
1912.17.1
192
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-add-map-and-unmap-callbacks-for-grant-region.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-add-map-and-unmap-callbacks-for-grant-region.patch
new file mode 100644
index 00000000..87bbc3c6
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-add-map-and-unmap-callbacks-for-grant-region.patch
@@ -0,0 +1,255 @@
1From ef94d70d4a22c5282d6955a7ed066ef502e99829 Mon Sep 17 00:00:00 2001
2From: Juergen Gross <jgross@suse.com>
3Date: Fri, 26 Aug 2022 13:57:06 +0200
4Subject: [PATCH 6/8] xen: add map and unmap callbacks for grant region
5
6Add the callbacks for mapping/unmapping guest memory via grants to the
7special grant memory region.
8
9Signed-off-by: Juergen Gross <jgross@suse.com>
10Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
11Signed-off-by: Stefano Stabellini <stefano.stabellini@amd.com>
12Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
13---
14 hw/xen/xen-mapcache.c | 167 +++++++++++++++++++++++++++++++++++++++++-
15 softmmu/physmem.c | 11 ++-
16 2 files changed, 173 insertions(+), 5 deletions(-)
17
18diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
19index e53e7221f1..f81b75d216 100644
20--- a/hw/xen/xen-mapcache.c
21+++ b/hw/xen/xen-mapcache.c
22@@ -9,6 +9,8 @@
23 */
24
25 #include "qemu/osdep.h"
26+#include "qemu/queue.h"
27+#include "qemu/thread.h"
28 #include "qemu/units.h"
29 #include "qemu/error-report.h"
30
31@@ -24,6 +26,8 @@
32 #include "sysemu/xen-mapcache.h"
33 #include "trace.h"
34
35+#include <xenevtchn.h>
36+#include <xengnttab.h>
37
38 //#define MAPCACHE_DEBUG
39
40@@ -386,7 +390,7 @@ uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
41 return p;
42 }
43
44-ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
45+static ram_addr_t xen_ram_addr_from_mapcache_try(void *ptr)
46 {
47 MapCacheEntry *entry = NULL;
48 MapCacheRev *reventry;
49@@ -595,10 +599,170 @@ uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
50 return p;
51 }
52
53+struct XENMappedGrantRegion {
54+ void *addr;
55+ unsigned int pages;
56+ unsigned int refs;
57+ unsigned int prot;
58+ uint32_t idx;
59+ QLIST_ENTRY(XENMappedGrantRegion) list;
60+};
61+
62+static xengnttab_handle *xen_region_gnttabdev;
63+static QLIST_HEAD(GrantRegionList, XENMappedGrantRegion) xen_grant_mappings =
64+ QLIST_HEAD_INITIALIZER(xen_grant_mappings);
65+static QemuMutex xen_map_mutex;
66+
67+static void *xen_map_grant_dyn(MemoryRegion **mr, hwaddr addr, hwaddr *plen,
68+ bool is_write, MemTxAttrs attrs)
69+{
70+ unsigned int page_off = addr & (XC_PAGE_SIZE - 1);
71+ unsigned int i;
72+ unsigned int nrefs = (page_off + *plen + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
73+ uint32_t ref = (addr - XEN_GRANT_ADDR_OFF) >> XC_PAGE_SHIFT;
74+ uint32_t *refs;
75+ unsigned int prot = PROT_READ;
76+ struct XENMappedGrantRegion *mgr = NULL;
77+
78+ if (is_write) {
79+ prot |= PROT_WRITE;
80+ }
81+
82+ qemu_mutex_lock(&xen_map_mutex);
83+
84+ QLIST_FOREACH(mgr, &xen_grant_mappings, list) {
85+ if (mgr->idx == ref &&
86+ mgr->pages == nrefs &&
87+ (mgr->prot & prot) == prot) {
88+ break;
89+ }
90+ }
91+ if (!mgr) {
92+ mgr = g_new(struct XENMappedGrantRegion, 1);
93+
94+ if (nrefs == 1) {
95+ refs = &ref;
96+ } else {
97+ refs = g_new(uint32_t, nrefs);
98+ for (i = 0; i < nrefs; i++) {
99+ refs[i] = ref + i;
100+ }
101+ }
102+ mgr->addr = xengnttab_map_domain_grant_refs(xen_region_gnttabdev, nrefs,
103+ xen_domid, refs, prot);
104+ if (mgr->addr) {
105+ mgr->pages = nrefs;
106+ mgr->refs = 1;
107+ mgr->prot = prot;
108+ mgr->idx = ref;
109+
110+ QLIST_INSERT_HEAD(&xen_grant_mappings, mgr, list);
111+ } else {
112+ g_free(mgr);
113+ mgr = NULL;
114+ }
115+ } else {
116+ mgr->refs++;
117+ }
118+
119+ qemu_mutex_unlock(&xen_map_mutex);
120+
121+ if (nrefs > 1) {
122+ g_free(refs);
123+ }
124+
125+ return mgr ? mgr->addr + page_off : NULL;
126+}
127+
128+static void xen_unmap_grant_dyn(MemoryRegion *mr, void *buffer, ram_addr_t addr,
129+ hwaddr len, bool is_write, hwaddr access_len)
130+{
131+ unsigned int page_off = (unsigned long)buffer & (XC_PAGE_SIZE - 1);
132+ unsigned int nrefs = (page_off + len + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
133+ unsigned int prot = PROT_READ;
134+ struct XENMappedGrantRegion *mgr = NULL;
135+
136+ if (is_write) {
137+ prot |= PROT_WRITE;
138+ }
139+
140+ qemu_mutex_lock(&xen_map_mutex);
141+
142+ QLIST_FOREACH(mgr, &xen_grant_mappings, list) {
143+ if (mgr->addr == buffer - page_off &&
144+ mgr->pages == nrefs &&
145+ (mgr->prot & prot) == prot) {
146+ break;
147+ }
148+ }
149+ if (mgr) {
150+ mgr->refs--;
151+ if (!mgr->refs) {
152+ xengnttab_unmap(xen_region_gnttabdev, mgr->addr, nrefs);
153+
154+ QLIST_REMOVE(mgr, list);
155+ g_free(mgr);
156+ }
157+ } else {
158+ error_report("xen_unmap_grant_dyn() trying to unmap unknown buffer");
159+ }
160+
161+ qemu_mutex_unlock(&xen_map_mutex);
162+}
163+
164+static ram_addr_t xen_ram_addr_from_grant_cache(void *ptr)
165+{
166+ unsigned int page_off = (unsigned long)ptr & (XC_PAGE_SIZE - 1);
167+ struct XENMappedGrantRegion *mgr = NULL;
168+ ram_addr_t raddr = RAM_ADDR_INVALID;
169+
170+ qemu_mutex_lock(&xen_map_mutex);
171+
172+ QLIST_FOREACH(mgr, &xen_grant_mappings, list) {
173+ if (mgr->addr == ptr - page_off) {
174+ break;
175+ }
176+ }
177+
178+ if (mgr) {
179+ raddr = (mgr->idx << XC_PAGE_SHIFT) + page_off + XEN_GRANT_ADDR_OFF;
180+ }
181+
182+ qemu_mutex_unlock(&xen_map_mutex);
183+
184+ return raddr;
185+}
186+
187+ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
188+{
189+ ram_addr_t raddr;
190+
191+ raddr = xen_ram_addr_from_mapcache_try(ptr);
192+ if (raddr == RAM_ADDR_INVALID) {
193+ raddr = xen_ram_addr_from_grant_cache(ptr);
194+ }
195+
196+ return raddr;
197+}
198+
199+static const struct MemoryRegionOps xen_grant_mr_ops = {
200+ .map = xen_map_grant_dyn,
201+ .unmap = xen_unmap_grant_dyn,
202+ .endianness = DEVICE_LITTLE_ENDIAN,
203+};
204+
205 MemoryRegion *xen_init_grant_ram(void)
206 {
207 RAMBlock *block;
208
209+ qemu_mutex_init(&xen_map_mutex);
210+
211+ xen_region_gnttabdev = xengnttab_open(NULL, 0);
212+ if (xen_region_gnttabdev == NULL) {
213+ fprintf(stderr, "can't open gnttab device\n");
214+ return NULL;
215+ }
216+
217 memory_region_init(&ram_grants, NULL, "xen.grants",
218 XEN_MAX_VIRTIO_GRANTS * XC_PAGE_SIZE);
219 block = g_malloc0(sizeof(*block));
220@@ -613,6 +777,7 @@ MemoryRegion *xen_init_grant_ram(void)
221 ram_grants.ram_block = block;
222 ram_grants.ram = true;
223 ram_grants.terminates = true;
224+ ram_grants.ops = &xen_grant_mr_ops;
225 ram_block_add_list(block);
226 memory_region_add_subregion(get_system_memory(), XEN_GRANT_ADDR_OFF,
227 &ram_grants);
228diff --git a/softmmu/physmem.c b/softmmu/physmem.c
229index 2038240311..6b2a02fc87 100644
230--- a/softmmu/physmem.c
231+++ b/softmmu/physmem.c
232@@ -2391,13 +2391,16 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
233
234 if (xen_enabled()) {
235 ram_addr_t ram_addr;
236+
237 RCU_READ_LOCK_GUARD();
238 ram_addr = xen_ram_addr_from_mapcache(ptr);
239- block = qemu_get_ram_block(ram_addr);
240- if (block) {
241- *offset = ram_addr - block->offset;
242+ if (ram_addr != RAM_ADDR_INVALID) {
243+ block = qemu_get_ram_block(ram_addr);
244+ if (block) {
245+ *offset = ram_addr - block->offset;
246+ }
247+ return block;
248 }
249- return block;
250 }
251
252 RCU_READ_LOCK_GUARD();
253--
2542.25.1
255
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch
new file mode 100644
index 00000000..7df302a2
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0006-xen-hvm-move-common-functions-to-hw-xen-xen-hvm-comm.patch
@@ -0,0 +1,2094 @@
1From 87d362e72e65b604da7554657204344a6540d88c Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 1 Jul 2022 15:59:47 -0700
4Subject: [PATCH 06/16] xen-hvm: move common functions to
5 hw/xen/xen-hvm-common.c
6
7Extract common functionalities from xen-hvm.c and move them to
8hw/xen/xen-hvm-common.c. These common functions are useful for creating
9an IOREQ server.
10
11Moved the common usable IOREQ creation part to a new function
12xen_register_ioreq() which can be used by both x86 and ARM machines.
13
14NOTE: This patch will break the build as the patch only involves moving
15of functions. Build fixes will be in the next patch.
16
17Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
18Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
19Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
20---
21 hw/i386/xen/trace-events | 14 -
22 hw/i386/xen/xen-hvm.c | 927 +-------------------------------
23 hw/xen/meson.build | 5 +-
24 hw/xen/trace-events | 14 +
25 hw/xen/xen-hvm-common.c | 861 +++++++++++++++++++++++++++++
26 include/hw/i386/xen_arch_hvm.h | 1 +
27 include/hw/xen/xen-hvm-common.h | 98 ++++
28 7 files changed, 986 insertions(+), 934 deletions(-)
29 create mode 100644 hw/xen/xen-hvm-common.c
30 create mode 100644 include/hw/xen/xen-hvm-common.h
31
32diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events
33index a0c89d91c4..5d0a8d6dcf 100644
34--- a/hw/i386/xen/trace-events
35+++ b/hw/i386/xen/trace-events
36@@ -7,17 +7,3 @@ xen_platform_log(char *s) "xen platform: %s"
37 xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address 0x%"PRIx64")"
38 xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address 0x%"PRIx64")"
39
40-# xen-hvm.c
41-xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx"
42-xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i"
43-handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
44-handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
45-handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
46-cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
47-cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
48-cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
49-cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
50-xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p"
51-cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
52-cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
53-
54diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
55index 178f0c68fc..36d87555a9 100644
56--- a/hw/i386/xen/xen-hvm.c
57+++ b/hw/i386/xen/xen-hvm.c
58@@ -10,43 +10,21 @@
59
60 #include "qemu/osdep.h"
61 #include "qemu/units.h"
62+#include "qapi/error.h"
63+#include "qapi/qapi-commands-migration.h"
64+#include "trace.h"
65
66-#include "cpu.h"
67-#include "hw/pci/pci.h"
68-#include "hw/pci/pci_host.h"
69 #include "hw/i386/pc.h"
70 #include "hw/irq.h"
71-#include "hw/hw.h"
72 #include "hw/i386/apic-msidef.h"
73-#include "hw/xen/xen_common.h"
74-#include "hw/xen/xen-legacy-backend.h"
75-#include "hw/xen/xen-bus.h"
76 #include "hw/xen/xen-x86.h"
77-#include "qapi/error.h"
78-#include "qapi/qapi-commands-migration.h"
79-#include "qemu/error-report.h"
80-#include "qemu/main-loop.h"
81 #include "qemu/range.h"
82-#include "sysemu/runstate.h"
83-#include "sysemu/sysemu.h"
84-#include "sysemu/xen.h"
85-#include "sysemu/xen-mapcache.h"
86-#include "trace.h"
87
88-#include <xen/hvm/ioreq.h>
89+#include "hw/xen/xen-hvm-common.h"
90+#include "hw/xen/arch_hvm.h"
91 #include <xen/hvm/e820.h>
92
93-//#define DEBUG_XEN_HVM
94-
95-#ifdef DEBUG_XEN_HVM
96-#define DPRINTF(fmt, ...) \
97- do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
98-#else
99-#define DPRINTF(fmt, ...) \
100- do { } while (0)
101-#endif
102-
103-static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
104+static MemoryRegion ram_640k, ram_lo, ram_hi;
105 static MemoryRegion *framebuffer;
106 static bool xen_in_migration;
107
108@@ -75,25 +53,6 @@ typedef struct shared_vmport_iopage shared_vmport_iopage_t;
109 #endif
110 static shared_vmport_iopage_t *shared_vmport_page;
111
112-static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
113-{
114- return shared_page->vcpu_ioreq[i].vp_eport;
115-}
116-static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
117-{
118- return &shared_page->vcpu_ioreq[vcpu];
119-}
120-
121-#define BUFFER_IO_MAX_DELAY 100
122-
123-typedef struct XenPhysmap {
124- hwaddr start_addr;
125- ram_addr_t size;
126- const char *name;
127- hwaddr phys_offset;
128-
129- QLIST_ENTRY(XenPhysmap) list;
130-} XenPhysmap;
131
132 static QLIST_HEAD(, XenPhysmap) xen_physmap;
133 static const XenPhysmap *log_for_dirtybit = NULL;
134@@ -102,40 +61,6 @@ static unsigned long *dirty_bitmap = NULL;
135 static Notifier suspend;
136 static Notifier wakeup;
137
138-typedef struct XenPciDevice {
139- PCIDevice *pci_dev;
140- uint32_t sbdf;
141- QLIST_ENTRY(XenPciDevice) entry;
142-} XenPciDevice;
143-
144-typedef struct XenIOState {
145- ioservid_t ioservid;
146- shared_iopage_t *shared_page;
147- buffered_iopage_t *buffered_io_page;
148- xenforeignmemory_resource_handle *fres;
149- QEMUTimer *buffered_io_timer;
150- CPUState **cpu_by_vcpu_id;
151- /* the evtchn port for polling the notification, */
152- evtchn_port_t *ioreq_local_port;
153- /* evtchn remote and local ports for buffered io */
154- evtchn_port_t bufioreq_remote_port;
155- evtchn_port_t bufioreq_local_port;
156- /* the evtchn fd for polling */
157- xenevtchn_handle *xce_handle;
158- /* which vcpu we are serving */
159- int send_vcpu;
160-
161- struct xs_handle *xenstore;
162- MemoryListener memory_listener;
163- MemoryListener io_listener;
164- QLIST_HEAD(, XenPciDevice) dev_list;
165- DeviceListener device_listener;
166-
167- Notifier exit;
168-} XenIOState;
169-
170-#include "hw/xen/arch_hvm.h"
171-
172 /* Xen specific function for piix pci */
173
174 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
175@@ -248,42 +173,6 @@ static void xen_ram_init(PCMachineState *pcms,
176 }
177 }
178
179-void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
180- Error **errp)
181-{
182- unsigned long nr_pfn;
183- xen_pfn_t *pfn_list;
184- int i;
185-
186- if (runstate_check(RUN_STATE_INMIGRATE)) {
187- /* RAM already populated in Xen */
188- fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
189- " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
190- __func__, size, ram_addr);
191- return;
192- }
193-
194- if (mr == &ram_memory) {
195- return;
196- }
197-
198- trace_xen_ram_alloc(ram_addr, size);
199-
200- nr_pfn = size >> TARGET_PAGE_BITS;
201- pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
202-
203- for (i = 0; i < nr_pfn; i++) {
204- pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
205- }
206-
207- if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
208- error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
209- ram_addr);
210- }
211-
212- g_free(pfn_list);
213-}
214-
215 static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
216 {
217 XenPhysmap *physmap = NULL;
218@@ -473,109 +362,6 @@ static int xen_remove_from_physmap(XenIOState *state,
219 return 0;
220 }
221
222-static void xen_set_memory(struct MemoryListener *listener,
223- MemoryRegionSection *section,
224- bool add)
225-{
226- XenIOState *state = container_of(listener, XenIOState, memory_listener);
227-
228- if (section->mr == &ram_memory) {
229- return;
230- } else {
231- if (add) {
232- xen_map_memory_section(xen_domid, state->ioservid,
233- section);
234- } else {
235- xen_unmap_memory_section(xen_domid, state->ioservid,
236- section);
237- }
238- }
239- arch_xen_set_memory(state, section, add);
240-}
241-
242-static void xen_region_add(MemoryListener *listener,
243- MemoryRegionSection *section)
244-{
245- memory_region_ref(section->mr);
246- xen_set_memory(listener, section, true);
247-}
248-
249-static void xen_region_del(MemoryListener *listener,
250- MemoryRegionSection *section)
251-{
252- xen_set_memory(listener, section, false);
253- memory_region_unref(section->mr);
254-}
255-
256-static void xen_io_add(MemoryListener *listener,
257- MemoryRegionSection *section)
258-{
259- XenIOState *state = container_of(listener, XenIOState, io_listener);
260- MemoryRegion *mr = section->mr;
261-
262- if (mr->ops == &unassigned_io_ops) {
263- return;
264- }
265-
266- memory_region_ref(mr);
267-
268- xen_map_io_section(xen_domid, state->ioservid, section);
269-}
270-
271-static void xen_io_del(MemoryListener *listener,
272- MemoryRegionSection *section)
273-{
274- XenIOState *state = container_of(listener, XenIOState, io_listener);
275- MemoryRegion *mr = section->mr;
276-
277- if (mr->ops == &unassigned_io_ops) {
278- return;
279- }
280-
281- xen_unmap_io_section(xen_domid, state->ioservid, section);
282-
283- memory_region_unref(mr);
284-}
285-
286-static void xen_device_realize(DeviceListener *listener,
287- DeviceState *dev)
288-{
289- XenIOState *state = container_of(listener, XenIOState, device_listener);
290-
291- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
292- PCIDevice *pci_dev = PCI_DEVICE(dev);
293- XenPciDevice *xendev = g_new(XenPciDevice, 1);
294-
295- xendev->pci_dev = pci_dev;
296- xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
297- pci_dev->devfn);
298- QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
299-
300- xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
301- }
302-}
303-
304-static void xen_device_unrealize(DeviceListener *listener,
305- DeviceState *dev)
306-{
307- XenIOState *state = container_of(listener, XenIOState, device_listener);
308-
309- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
310- PCIDevice *pci_dev = PCI_DEVICE(dev);
311- XenPciDevice *xendev, *next;
312-
313- xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
314-
315- QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
316- if (xendev->pci_dev == pci_dev) {
317- QLIST_REMOVE(xendev, entry);
318- g_free(xendev);
319- break;
320- }
321- }
322- }
323-}
324-
325 static void xen_sync_dirty_bitmap(XenIOState *state,
326 hwaddr start_addr,
327 ram_addr_t size)
328@@ -683,277 +469,6 @@ static MemoryListener xen_memory_listener = {
329 .priority = 10,
330 };
331
332-static MemoryListener xen_io_listener = {
333- .name = "xen-io",
334- .region_add = xen_io_add,
335- .region_del = xen_io_del,
336- .priority = 10,
337-};
338-
339-static DeviceListener xen_device_listener = {
340- .realize = xen_device_realize,
341- .unrealize = xen_device_unrealize,
342-};
343-
344-/* get the ioreq packets from share mem */
345-static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
346-{
347- ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
348-
349- if (req->state != STATE_IOREQ_READY) {
350- DPRINTF("I/O request not ready: "
351- "%x, ptr: %x, port: %"PRIx64", "
352- "data: %"PRIx64", count: %u, size: %u\n",
353- req->state, req->data_is_ptr, req->addr,
354- req->data, req->count, req->size);
355- return NULL;
356- }
357-
358- xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
359-
360- req->state = STATE_IOREQ_INPROCESS;
361- return req;
362-}
363-
364-/* use poll to get the port notification */
365-/* ioreq_vec--out,the */
366-/* retval--the number of ioreq packet */
367-static ioreq_t *cpu_get_ioreq(XenIOState *state)
368-{
369- MachineState *ms = MACHINE(qdev_get_machine());
370- unsigned int max_cpus = ms->smp.max_cpus;
371- int i;
372- evtchn_port_t port;
373-
374- port = xenevtchn_pending(state->xce_handle);
375- if (port == state->bufioreq_local_port) {
376- timer_mod(state->buffered_io_timer,
377- BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
378- return NULL;
379- }
380-
381- if (port != -1) {
382- for (i = 0; i < max_cpus; i++) {
383- if (state->ioreq_local_port[i] == port) {
384- break;
385- }
386- }
387-
388- if (i == max_cpus) {
389- hw_error("Fatal error while trying to get io event!\n");
390- }
391-
392- /* unmask the wanted port again */
393- xenevtchn_unmask(state->xce_handle, port);
394-
395- /* get the io packet from shared memory */
396- state->send_vcpu = i;
397- return cpu_get_ioreq_from_shared_memory(state, i);
398- }
399-
400- /* read error or read nothing */
401- return NULL;
402-}
403-
404-static uint32_t do_inp(uint32_t addr, unsigned long size)
405-{
406- switch (size) {
407- case 1:
408- return cpu_inb(addr);
409- case 2:
410- return cpu_inw(addr);
411- case 4:
412- return cpu_inl(addr);
413- default:
414- hw_error("inp: bad size: %04x %lx", addr, size);
415- }
416-}
417-
418-static void do_outp(uint32_t addr,
419- unsigned long size, uint32_t val)
420-{
421- switch (size) {
422- case 1:
423- return cpu_outb(addr, val);
424- case 2:
425- return cpu_outw(addr, val);
426- case 4:
427- return cpu_outl(addr, val);
428- default:
429- hw_error("outp: bad size: %04x %lx", addr, size);
430- }
431-}
432-
433-/*
434- * Helper functions which read/write an object from/to physical guest
435- * memory, as part of the implementation of an ioreq.
436- *
437- * Equivalent to
438- * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
439- * val, req->size, 0/1)
440- * except without the integer overflow problems.
441- */
442-static void rw_phys_req_item(hwaddr addr,
443- ioreq_t *req, uint32_t i, void *val, int rw)
444-{
445- /* Do everything unsigned so overflow just results in a truncated result
446- * and accesses to undesired parts of guest memory, which is up
447- * to the guest */
448- hwaddr offset = (hwaddr)req->size * i;
449- if (req->df) {
450- addr -= offset;
451- } else {
452- addr += offset;
453- }
454- cpu_physical_memory_rw(addr, val, req->size, rw);
455-}
456-
457-static inline void read_phys_req_item(hwaddr addr,
458- ioreq_t *req, uint32_t i, void *val)
459-{
460- rw_phys_req_item(addr, req, i, val, 0);
461-}
462-static inline void write_phys_req_item(hwaddr addr,
463- ioreq_t *req, uint32_t i, void *val)
464-{
465- rw_phys_req_item(addr, req, i, val, 1);
466-}
467-
468-
469-static void cpu_ioreq_pio(ioreq_t *req)
470-{
471- uint32_t i;
472-
473- trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
474- req->data, req->count, req->size);
475-
476- if (req->size > sizeof(uint32_t)) {
477- hw_error("PIO: bad size (%u)", req->size);
478- }
479-
480- if (req->dir == IOREQ_READ) {
481- if (!req->data_is_ptr) {
482- req->data = do_inp(req->addr, req->size);
483- trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
484- req->size);
485- } else {
486- uint32_t tmp;
487-
488- for (i = 0; i < req->count; i++) {
489- tmp = do_inp(req->addr, req->size);
490- write_phys_req_item(req->data, req, i, &tmp);
491- }
492- }
493- } else if (req->dir == IOREQ_WRITE) {
494- if (!req->data_is_ptr) {
495- trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
496- req->size);
497- do_outp(req->addr, req->size, req->data);
498- } else {
499- for (i = 0; i < req->count; i++) {
500- uint32_t tmp = 0;
501-
502- read_phys_req_item(req->data, req, i, &tmp);
503- do_outp(req->addr, req->size, tmp);
504- }
505- }
506- }
507-}
508-
509-static void cpu_ioreq_move(ioreq_t *req)
510-{
511- uint32_t i;
512-
513- trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
514- req->data, req->count, req->size);
515-
516- if (req->size > sizeof(req->data)) {
517- hw_error("MMIO: bad size (%u)", req->size);
518- }
519-
520- if (!req->data_is_ptr) {
521- if (req->dir == IOREQ_READ) {
522- for (i = 0; i < req->count; i++) {
523- read_phys_req_item(req->addr, req, i, &req->data);
524- }
525- } else if (req->dir == IOREQ_WRITE) {
526- for (i = 0; i < req->count; i++) {
527- write_phys_req_item(req->addr, req, i, &req->data);
528- }
529- }
530- } else {
531- uint64_t tmp;
532-
533- if (req->dir == IOREQ_READ) {
534- for (i = 0; i < req->count; i++) {
535- read_phys_req_item(req->addr, req, i, &tmp);
536- write_phys_req_item(req->data, req, i, &tmp);
537- }
538- } else if (req->dir == IOREQ_WRITE) {
539- for (i = 0; i < req->count; i++) {
540- read_phys_req_item(req->data, req, i, &tmp);
541- write_phys_req_item(req->addr, req, i, &tmp);
542- }
543- }
544- }
545-}
546-
547-static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
548-{
549- uint32_t sbdf = req->addr >> 32;
550- uint32_t reg = req->addr;
551- XenPciDevice *xendev;
552-
553- if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
554- req->size != sizeof(uint32_t)) {
555- hw_error("PCI config access: bad size (%u)", req->size);
556- }
557-
558- if (req->count != 1) {
559- hw_error("PCI config access: bad count (%u)", req->count);
560- }
561-
562- QLIST_FOREACH(xendev, &state->dev_list, entry) {
563- if (xendev->sbdf != sbdf) {
564- continue;
565- }
566-
567- if (!req->data_is_ptr) {
568- if (req->dir == IOREQ_READ) {
569- req->data = pci_host_config_read_common(
570- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
571- req->size);
572- trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
573- req->size, req->data);
574- } else if (req->dir == IOREQ_WRITE) {
575- trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
576- req->size, req->data);
577- pci_host_config_write_common(
578- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
579- req->data, req->size);
580- }
581- } else {
582- uint32_t tmp;
583-
584- if (req->dir == IOREQ_READ) {
585- tmp = pci_host_config_read_common(
586- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
587- req->size);
588- trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
589- req->size, tmp);
590- write_phys_req_item(req->data, req, 0, &tmp);
591- } else if (req->dir == IOREQ_WRITE) {
592- read_phys_req_item(req->data, req, 0, &tmp);
593- trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
594- req->size, tmp);
595- pci_host_config_write_common(
596- xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
597- tmp, req->size);
598- }
599- }
600- }
601-}
602-
603 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
604 {
605 X86CPU *cpu;
606@@ -997,223 +512,6 @@ static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
607 current_cpu = NULL;
608 }
609
610-static void handle_ioreq(XenIOState *state, ioreq_t *req)
611-{
612- trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
613- req->addr, req->data, req->count, req->size);
614-
615- if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
616- (req->size < sizeof (target_ulong))) {
617- req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
618- }
619-
620- if (req->dir == IOREQ_WRITE)
621- trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
622- req->addr, req->data, req->count, req->size);
623-
624- switch (req->type) {
625- case IOREQ_TYPE_PIO:
626- cpu_ioreq_pio(req);
627- break;
628- case IOREQ_TYPE_COPY:
629- cpu_ioreq_move(req);
630- break;
631- case IOREQ_TYPE_TIMEOFFSET:
632- break;
633- case IOREQ_TYPE_INVALIDATE:
634- xen_invalidate_map_cache();
635- break;
636- case IOREQ_TYPE_PCI_CONFIG:
637- cpu_ioreq_config(state, req);
638- break;
639- default:
640- arch_handle_ioreq(state, req);
641- }
642- if (req->dir == IOREQ_READ) {
643- trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
644- req->addr, req->data, req->count, req->size);
645- }
646-}
647-
648-static bool handle_buffered_iopage(XenIOState *state)
649-{
650- buffered_iopage_t *buf_page = state->buffered_io_page;
651- buf_ioreq_t *buf_req = NULL;
652- bool handled_ioreq = false;
653- ioreq_t req;
654- int qw;
655-
656- if (!buf_page) {
657- return 0;
658- }
659-
660- memset(&req, 0x00, sizeof(req));
661- req.state = STATE_IOREQ_READY;
662- req.count = 1;
663- req.dir = IOREQ_WRITE;
664-
665- for (;;) {
666- uint32_t rdptr = buf_page->read_pointer, wrptr;
667-
668- xen_rmb();
669- wrptr = buf_page->write_pointer;
670- xen_rmb();
671- if (rdptr != buf_page->read_pointer) {
672- continue;
673- }
674- if (rdptr == wrptr) {
675- break;
676- }
677- buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
678- req.size = 1U << buf_req->size;
679- req.addr = buf_req->addr;
680- req.data = buf_req->data;
681- req.type = buf_req->type;
682- xen_rmb();
683- qw = (req.size == 8);
684- if (qw) {
685- if (rdptr + 1 == wrptr) {
686- hw_error("Incomplete quad word buffered ioreq");
687- }
688- buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
689- IOREQ_BUFFER_SLOT_NUM];
690- req.data |= ((uint64_t)buf_req->data) << 32;
691- xen_rmb();
692- }
693-
694- handle_ioreq(state, &req);
695-
696- /* Only req.data may get updated by handle_ioreq(), albeit even that
697- * should not happen as such data would never make it to the guest (we
698- * can only usefully see writes here after all).
699- */
700- assert(req.state == STATE_IOREQ_READY);
701- assert(req.count == 1);
702- assert(req.dir == IOREQ_WRITE);
703- assert(!req.data_is_ptr);
704-
705- qatomic_add(&buf_page->read_pointer, qw + 1);
706- handled_ioreq = true;
707- }
708-
709- return handled_ioreq;
710-}
711-
712-static void handle_buffered_io(void *opaque)
713-{
714- XenIOState *state = opaque;
715-
716- if (handle_buffered_iopage(state)) {
717- timer_mod(state->buffered_io_timer,
718- BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
719- } else {
720- timer_del(state->buffered_io_timer);
721- xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
722- }
723-}
724-
725-static void cpu_handle_ioreq(void *opaque)
726-{
727- XenIOState *state = opaque;
728- ioreq_t *req = cpu_get_ioreq(state);
729-
730- handle_buffered_iopage(state);
731- if (req) {
732- ioreq_t copy = *req;
733-
734- xen_rmb();
735- handle_ioreq(state, &copy);
736- req->data = copy.data;
737-
738- if (req->state != STATE_IOREQ_INPROCESS) {
739- fprintf(stderr, "Badness in I/O request ... not in service?!: "
740- "%x, ptr: %x, port: %"PRIx64", "
741- "data: %"PRIx64", count: %u, size: %u, type: %u\n",
742- req->state, req->data_is_ptr, req->addr,
743- req->data, req->count, req->size, req->type);
744- destroy_hvm_domain(false);
745- return;
746- }
747-
748- xen_wmb(); /* Update ioreq contents /then/ update state. */
749-
750- /*
751- * We do this before we send the response so that the tools
752- * have the opportunity to pick up on the reset before the
753- * guest resumes and does a hlt with interrupts disabled which
754- * causes Xen to powerdown the domain.
755- */
756- if (runstate_is_running()) {
757- ShutdownCause request;
758-
759- if (qemu_shutdown_requested_get()) {
760- destroy_hvm_domain(false);
761- }
762- request = qemu_reset_requested_get();
763- if (request) {
764- qemu_system_reset(request);
765- destroy_hvm_domain(true);
766- }
767- }
768-
769- req->state = STATE_IORESP_READY;
770- xenevtchn_notify(state->xce_handle,
771- state->ioreq_local_port[state->send_vcpu]);
772- }
773-}
774-
775-static void xen_main_loop_prepare(XenIOState *state)
776-{
777- int evtchn_fd = -1;
778-
779- if (state->xce_handle != NULL) {
780- evtchn_fd = xenevtchn_fd(state->xce_handle);
781- }
782-
783- state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
784- state);
785-
786- if (evtchn_fd != -1) {
787- CPUState *cpu_state;
788-
789- DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
790- CPU_FOREACH(cpu_state) {
791- DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
792- __func__, cpu_state->cpu_index, cpu_state);
793- state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
794- }
795- qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
796- }
797-}
798-
799-
800-static void xen_hvm_change_state_handler(void *opaque, bool running,
801- RunState rstate)
802-{
803- XenIOState *state = opaque;
804-
805- if (running) {
806- xen_main_loop_prepare(state);
807- }
808-
809- xen_set_ioreq_server_state(xen_domid,
810- state->ioservid,
811- (rstate == RUN_STATE_RUNNING));
812-}
813-
814-static void xen_exit_notifier(Notifier *n, void *data)
815-{
816- XenIOState *state = container_of(n, XenIOState, exit);
817-
818- xen_destroy_ioreq_server(xen_domid, state->ioservid);
819- if (state->fres != NULL) {
820- xenforeignmemory_unmap_resource(xen_fmem, state->fres);
821- }
822-
823- xenevtchn_close(state->xce_handle);
824- xs_daemon_close(state->xenstore);
825-}
826-
827 #ifdef XEN_COMPAT_PHYSMAP
828 static void xen_read_physmap(XenIOState *state)
829 {
830@@ -1273,178 +571,17 @@ static void xen_wakeup_notifier(Notifier *notifier, void *data)
831 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
832 }
833
834-static int xen_map_ioreq_server(XenIOState *state)
835-{
836- void *addr = NULL;
837- xen_pfn_t ioreq_pfn;
838- xen_pfn_t bufioreq_pfn;
839- evtchn_port_t bufioreq_evtchn;
840- int rc;
841-
842- /*
843- * Attempt to map using the resource API and fall back to normal
844- * foreign mapping if this is not supported.
845- */
846- QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
847- QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
848- state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
849- XENMEM_resource_ioreq_server,
850- state->ioservid, 0, 2,
851- &addr,
852- PROT_READ | PROT_WRITE, 0);
853- if (state->fres != NULL) {
854- trace_xen_map_resource_ioreq(state->ioservid, addr);
855- state->buffered_io_page = addr;
856- state->shared_page = addr + TARGET_PAGE_SIZE;
857- } else if (errno != EOPNOTSUPP) {
858- error_report("failed to map ioreq server resources: error %d handle=%p",
859- errno, xen_xc);
860- return -1;
861- }
862-
863- rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
864- (state->shared_page == NULL) ?
865- &ioreq_pfn : NULL,
866- (state->buffered_io_page == NULL) ?
867- &bufioreq_pfn : NULL,
868- &bufioreq_evtchn);
869- if (rc < 0) {
870- error_report("failed to get ioreq server info: error %d handle=%p",
871- errno, xen_xc);
872- return rc;
873- }
874-
875- if (state->shared_page == NULL) {
876- DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
877-
878- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
879- PROT_READ | PROT_WRITE,
880- 1, &ioreq_pfn, NULL);
881- if (state->shared_page == NULL) {
882- error_report("map shared IO page returned error %d handle=%p",
883- errno, xen_xc);
884- }
885- }
886-
887- if (state->buffered_io_page == NULL) {
888- DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
889-
890- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
891- PROT_READ | PROT_WRITE,
892- 1, &bufioreq_pfn,
893- NULL);
894- if (state->buffered_io_page == NULL) {
895- error_report("map buffered IO page returned error %d", errno);
896- return -1;
897- }
898- }
899-
900- if (state->shared_page == NULL || state->buffered_io_page == NULL) {
901- return -1;
902- }
903-
904- DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
905-
906- state->bufioreq_remote_port = bufioreq_evtchn;
907-
908- return 0;
909-}
910-
911 void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
912 {
913 MachineState *ms = MACHINE(pcms);
914 unsigned int max_cpus = ms->smp.max_cpus;
915- int i, rc;
916+ int rc;
917 xen_pfn_t ioreq_pfn;
918 XenIOState *state;
919
920 state = g_new0(XenIOState, 1);
921
922- state->xce_handle = xenevtchn_open(NULL, 0);
923- if (state->xce_handle == NULL) {
924- perror("xen: event channel open");
925- goto err;
926- }
927-
928- state->xenstore = xs_daemon_open();
929- if (state->xenstore == NULL) {
930- perror("xen: xenstore open");
931- goto err;
932- }
933-
934- xen_create_ioreq_server(xen_domid, &state->ioservid);
935-
936- state->exit.notify = xen_exit_notifier;
937- qemu_add_exit_notifier(&state->exit);
938-
939- /*
940- * Register wake-up support in QMP query-current-machine API
941- */
942- qemu_register_wakeup_support();
943-
944- rc = xen_map_ioreq_server(state);
945- if (rc < 0) {
946- goto err;
947- }
948-
949- /* Note: cpus is empty at this point in init */
950- state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
951-
952- rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
953- if (rc < 0) {
954- error_report("failed to enable ioreq server info: error %d handle=%p",
955- errno, xen_xc);
956- goto err;
957- }
958-
959- state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
960-
961- /* FIXME: how about if we overflow the page here? */
962- for (i = 0; i < max_cpus; i++) {
963- rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
964- xen_vcpu_eport(state->shared_page, i));
965- if (rc == -1) {
966- error_report("shared evtchn %d bind error %d", i, errno);
967- goto err;
968- }
969- state->ioreq_local_port[i] = rc;
970- }
971-
972- rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
973- state->bufioreq_remote_port);
974- if (rc == -1) {
975- error_report("buffered evtchn bind error %d", errno);
976- goto err;
977- }
978- state->bufioreq_local_port = rc;
979-
980- /* Init RAM management */
981-#ifdef XEN_COMPAT_PHYSMAP
982- xen_map_cache_init(xen_phys_offset_to_gaddr, state);
983-#else
984- xen_map_cache_init(NULL, state);
985-#endif
986-
987- qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
988-
989- state->memory_listener = xen_memory_listener;
990- memory_listener_register(&state->memory_listener, &address_space_memory);
991-
992- state->io_listener = xen_io_listener;
993- memory_listener_register(&state->io_listener, &address_space_io);
994-
995- state->device_listener = xen_device_listener;
996- QLIST_INIT(&state->dev_list);
997- device_listener_register(&state->device_listener);
998-
999- xen_bus_init();
1000-
1001- /* Initialize backend core & drivers */
1002- if (xen_be_init() != 0) {
1003- error_report("xen backend core setup failed");
1004- goto err;
1005- }
1006- xen_be_register_common();
1007+ xen_register_ioreq(state, max_cpus, xen_memory_listener);
1008
1009 QLIST_INIT(&xen_physmap);
1010 xen_read_physmap(state);
1011@@ -1484,59 +621,11 @@ err:
1012 exit(1);
1013 }
1014
1015-void destroy_hvm_domain(bool reboot)
1016-{
1017- xc_interface *xc_handle;
1018- int sts;
1019- int rc;
1020-
1021- unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
1022-
1023- if (xen_dmod) {
1024- rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
1025- if (!rc) {
1026- return;
1027- }
1028- if (errno != ENOTTY /* old Xen */) {
1029- perror("xendevicemodel_shutdown failed");
1030- }
1031- /* well, try the old thing then */
1032- }
1033-
1034- xc_handle = xc_interface_open(0, 0, 0);
1035- if (xc_handle == NULL) {
1036- fprintf(stderr, "Cannot acquire xenctrl handle\n");
1037- } else {
1038- sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
1039- if (sts != 0) {
1040- fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1041- "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1042- sts, strerror(errno));
1043- } else {
1044- fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1045- reboot ? "reboot" : "poweroff");
1046- }
1047- xc_interface_close(xc_handle);
1048- }
1049-}
1050-
1051 void xen_register_framebuffer(MemoryRegion *mr)
1052 {
1053 framebuffer = mr;
1054 }
1055
1056-void xen_shutdown_fatal_error(const char *fmt, ...)
1057-{
1058- va_list ap;
1059-
1060- va_start(ap, fmt);
1061- vfprintf(stderr, fmt, ap);
1062- va_end(ap);
1063- fprintf(stderr, "Will destroy the domain.\n");
1064- /* destroy the domain */
1065- qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
1066-}
1067-
1068 void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
1069 {
1070 if (unlikely(xen_in_migration)) {
1071diff --git a/hw/xen/meson.build b/hw/xen/meson.build
1072index 19d0637c46..008e036d63 100644
1073--- a/hw/xen/meson.build
1074+++ b/hw/xen/meson.build
1075@@ -25,4 +25,7 @@ specific_ss.add_all(when: ['CONFIG_XEN', xen], if_true: xen_specific_ss)
1076
1077 xen_ss = ss.source_set()
1078
1079-xen_ss.add(when: 'CONFIG_XEN', if_true: files('xen-mapcache.c'))
1080+xen_ss.add(when: 'CONFIG_XEN', if_true: files(
1081+ 'xen-mapcache.c',
1082+ 'xen-hvm-common.c',
1083+))
1084diff --git a/hw/xen/trace-events b/hw/xen/trace-events
1085index 2c8f238f42..02ca1183da 100644
1086--- a/hw/xen/trace-events
1087+++ b/hw/xen/trace-events
1088@@ -42,6 +42,20 @@ xs_node_vscanf(char *path, char *value) "%s %s"
1089 xs_node_watch(char *path) "%s"
1090 xs_node_unwatch(char *path) "%s"
1091
1092+# xen-hvm.c
1093+xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: 0x%lx, size 0x%lx"
1094+xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "0x%"PRIx64" size 0x%lx, log_dirty %i"
1095+handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1096+handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1097+handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1098+cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1099+cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
1100+cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=0x%"PRIx64" port=0x%"PRIx64" size=%d"
1101+cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=0x%"PRIx64" data=0x%"PRIx64" count=%d size=%d"
1102+xen_map_resource_ioreq(uint32_t id, void *addr) "id: %u addr: %p"
1103+cpu_ioreq_config_read(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
1104+cpu_ioreq_config_write(void *req, uint32_t sbdf, uint32_t reg, uint32_t size, uint32_t data) "I/O=%p sbdf=0x%x reg=%u size=%u data=0x%x"
1105+
1106 # xen-mapcache.c
1107 xen_map_cache(uint64_t phys_addr) "want 0x%"PRIx64
1108 xen_remap_bucket(uint64_t index) "index 0x%"PRIx64
1109diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
1110new file mode 100644
1111index 0000000000..67f76f6010
1112--- /dev/null
1113+++ b/hw/xen/xen-hvm-common.c
1114@@ -0,0 +1,861 @@
1115+#include "qemu/osdep.h"
1116+#include "qemu/units.h"
1117+#include "qapi/error.h"
1118+#include "trace.h"
1119+
1120+#include "hw/pci/pci_host.h"
1121+#include "hw/xen/xen-hvm-common.h"
1122+#include "hw/xen/xen-legacy-backend.h"
1123+#include "hw/xen/xen-bus.h"
1124+#include "hw/boards.h"
1125+#include "hw/xen/arch_hvm.h"
1126+
1127+MemoryRegion ram_memory;
1128+
1129+MemoryListener xen_io_listener = {
1130+ .name = "xen-io",
1131+ .region_add = xen_io_add,
1132+ .region_del = xen_io_del,
1133+ .priority = 10,
1134+};
1135+
1136+DeviceListener xen_device_listener = {
1137+ .realize = xen_device_realize,
1138+ .unrealize = xen_device_unrealize,
1139+};
1140+
1141+static void xen_set_memory(struct MemoryListener *listener,
1142+ MemoryRegionSection *section,
1143+ bool add)
1144+{
1145+ XenIOState *state = container_of(listener, XenIOState, memory_listener);
1146+
1147+ if (section->mr == &ram_memory) {
1148+ return;
1149+ } else {
1150+ if (add) {
1151+ xen_map_memory_section(xen_domid, state->ioservid,
1152+ section);
1153+ } else {
1154+ xen_unmap_memory_section(xen_domid, state->ioservid,
1155+ section);
1156+ }
1157+ }
1158+ arch_xen_set_memory(state, section, add);
1159+}
1160+
1161+void xen_region_add(MemoryListener *listener,
1162+ MemoryRegionSection *section)
1163+{
1164+ memory_region_ref(section->mr);
1165+ xen_set_memory(listener, section, true);
1166+}
1167+
1168+void xen_region_del(MemoryListener *listener,
1169+ MemoryRegionSection *section)
1170+{
1171+ xen_set_memory(listener, section, false);
1172+ memory_region_unref(section->mr);
1173+}
1174+
1175+void xen_io_add(MemoryListener *listener,
1176+ MemoryRegionSection *section)
1177+{
1178+ XenIOState *state = container_of(listener, XenIOState, io_listener);
1179+ MemoryRegion *mr = section->mr;
1180+
1181+ if (mr->ops == &unassigned_io_ops) {
1182+ return;
1183+ }
1184+
1185+ memory_region_ref(mr);
1186+
1187+ xen_map_io_section(xen_domid, state->ioservid, section);
1188+}
1189+
1190+void xen_io_del(MemoryListener *listener,
1191+ MemoryRegionSection *section)
1192+{
1193+ XenIOState *state = container_of(listener, XenIOState, io_listener);
1194+ MemoryRegion *mr = section->mr;
1195+
1196+ if (mr->ops == &unassigned_io_ops) {
1197+ return;
1198+ }
1199+
1200+ xen_unmap_io_section(xen_domid, state->ioservid, section);
1201+
1202+ memory_region_unref(mr);
1203+}
1204+
1205+void xen_device_realize(DeviceListener *listener,
1206+ DeviceState *dev)
1207+{
1208+ XenIOState *state = container_of(listener, XenIOState, device_listener);
1209+
1210+ if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1211+ PCIDevice *pci_dev = PCI_DEVICE(dev);
1212+ XenPciDevice *xendev = g_new(XenPciDevice, 1);
1213+
1214+ xendev->pci_dev = pci_dev;
1215+ xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
1216+ pci_dev->devfn);
1217+ QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
1218+
1219+ xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
1220+ }
1221+}
1222+
1223+void xen_device_unrealize(DeviceListener *listener,
1224+ DeviceState *dev)
1225+{
1226+ XenIOState *state = container_of(listener, XenIOState, device_listener);
1227+
1228+ if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1229+ PCIDevice *pci_dev = PCI_DEVICE(dev);
1230+ XenPciDevice *xendev, *next;
1231+
1232+ xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
1233+
1234+ QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
1235+ if (xendev->pci_dev == pci_dev) {
1236+ QLIST_REMOVE(xendev, entry);
1237+ g_free(xendev);
1238+ break;
1239+ }
1240+ }
1241+ }
1242+}
1243+
1244+/* get the ioreq packets from share mem */
1245+static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
1246+{
1247+ ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
1248+
1249+ if (req->state != STATE_IOREQ_READY) {
1250+ DPRINTF("I/O request not ready: "
1251+ "%x, ptr: %x, port: %"PRIx64", "
1252+ "data: %"PRIx64", count: %u, size: %u\n",
1253+ req->state, req->data_is_ptr, req->addr,
1254+ req->data, req->count, req->size);
1255+ return NULL;
1256+ }
1257+
1258+ xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
1259+
1260+ req->state = STATE_IOREQ_INPROCESS;
1261+ return req;
1262+}
1263+
1264+/* use poll to get the port notification */
1265+/* ioreq_vec--out,the */
1266+/* retval--the number of ioreq packet */
1267+static ioreq_t *cpu_get_ioreq(XenIOState *state)
1268+{
1269+ MachineState *ms = MACHINE(qdev_get_machine());
1270+ unsigned int max_cpus = ms->smp.max_cpus;
1271+ int i;
1272+ evtchn_port_t port;
1273+
1274+ port = xenevtchn_pending(state->xce_handle);
1275+ if (port == state->bufioreq_local_port) {
1276+ timer_mod(state->buffered_io_timer,
1277+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1278+ return NULL;
1279+ }
1280+
1281+ if (port != -1) {
1282+ for (i = 0; i < max_cpus; i++) {
1283+ if (state->ioreq_local_port[i] == port) {
1284+ break;
1285+ }
1286+ }
1287+
1288+ if (i == max_cpus) {
1289+ hw_error("Fatal error while trying to get io event!\n");
1290+ }
1291+
1292+ /* unmask the wanted port again */
1293+ xenevtchn_unmask(state->xce_handle, port);
1294+
1295+ /* get the io packet from shared memory */
1296+ state->send_vcpu = i;
1297+ return cpu_get_ioreq_from_shared_memory(state, i);
1298+ }
1299+
1300+ /* read error or read nothing */
1301+ return NULL;
1302+}
1303+
1304+static uint32_t do_inp(uint32_t addr, unsigned long size)
1305+{
1306+ switch (size) {
1307+ case 1:
1308+ return cpu_inb(addr);
1309+ case 2:
1310+ return cpu_inw(addr);
1311+ case 4:
1312+ return cpu_inl(addr);
1313+ default:
1314+ hw_error("inp: bad size: %04x %lx", addr, size);
1315+ }
1316+}
1317+
1318+static void do_outp(uint32_t addr,
1319+ unsigned long size, uint32_t val)
1320+{
1321+ switch (size) {
1322+ case 1:
1323+ return cpu_outb(addr, val);
1324+ case 2:
1325+ return cpu_outw(addr, val);
1326+ case 4:
1327+ return cpu_outl(addr, val);
1328+ default:
1329+ hw_error("outp: bad size: %04x %lx", addr, size);
1330+ }
1331+}
1332+
1333+/*
1334+ * Helper functions which read/write an object from/to physical guest
1335+ * memory, as part of the implementation of an ioreq.
1336+ *
1337+ * Equivalent to
1338+ * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
1339+ * val, req->size, 0/1)
1340+ * except without the integer overflow problems.
1341+ */
1342+static void rw_phys_req_item(hwaddr addr,
1343+ ioreq_t *req, uint32_t i, void *val, int rw)
1344+{
1345+ /* Do everything unsigned so overflow just results in a truncated result
1346+ * and accesses to undesired parts of guest memory, which is up
1347+ * to the guest */
1348+ hwaddr offset = (hwaddr)req->size * i;
1349+ if (req->df) {
1350+ addr -= offset;
1351+ } else {
1352+ addr += offset;
1353+ }
1354+ cpu_physical_memory_rw(addr, val, req->size, rw);
1355+}
1356+
1357+static inline void read_phys_req_item(hwaddr addr,
1358+ ioreq_t *req, uint32_t i, void *val)
1359+{
1360+ rw_phys_req_item(addr, req, i, val, 0);
1361+}
1362+static inline void write_phys_req_item(hwaddr addr,
1363+ ioreq_t *req, uint32_t i, void *val)
1364+{
1365+ rw_phys_req_item(addr, req, i, val, 1);
1366+}
1367+
1368+
1369+void cpu_ioreq_pio(ioreq_t *req)
1370+{
1371+ uint32_t i;
1372+
1373+ trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
1374+ req->data, req->count, req->size);
1375+
1376+ if (req->size > sizeof(uint32_t)) {
1377+ hw_error("PIO: bad size (%u)", req->size);
1378+ }
1379+
1380+ if (req->dir == IOREQ_READ) {
1381+ if (!req->data_is_ptr) {
1382+ req->data = do_inp(req->addr, req->size);
1383+ trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
1384+ req->size);
1385+ } else {
1386+ uint32_t tmp;
1387+
1388+ for (i = 0; i < req->count; i++) {
1389+ tmp = do_inp(req->addr, req->size);
1390+ write_phys_req_item(req->data, req, i, &tmp);
1391+ }
1392+ }
1393+ } else if (req->dir == IOREQ_WRITE) {
1394+ if (!req->data_is_ptr) {
1395+ trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
1396+ req->size);
1397+ do_outp(req->addr, req->size, req->data);
1398+ } else {
1399+ for (i = 0; i < req->count; i++) {
1400+ uint32_t tmp = 0;
1401+
1402+ read_phys_req_item(req->data, req, i, &tmp);
1403+ do_outp(req->addr, req->size, tmp);
1404+ }
1405+ }
1406+ }
1407+}
1408+
1409+static void cpu_ioreq_move(ioreq_t *req)
1410+{
1411+ uint32_t i;
1412+
1413+ trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
1414+ req->data, req->count, req->size);
1415+
1416+ if (req->size > sizeof(req->data)) {
1417+ hw_error("MMIO: bad size (%u)", req->size);
1418+ }
1419+
1420+ if (!req->data_is_ptr) {
1421+ if (req->dir == IOREQ_READ) {
1422+ for (i = 0; i < req->count; i++) {
1423+ read_phys_req_item(req->addr, req, i, &req->data);
1424+ }
1425+ } else if (req->dir == IOREQ_WRITE) {
1426+ for (i = 0; i < req->count; i++) {
1427+ write_phys_req_item(req->addr, req, i, &req->data);
1428+ }
1429+ }
1430+ } else {
1431+ uint64_t tmp;
1432+
1433+ if (req->dir == IOREQ_READ) {
1434+ for (i = 0; i < req->count; i++) {
1435+ read_phys_req_item(req->addr, req, i, &tmp);
1436+ write_phys_req_item(req->data, req, i, &tmp);
1437+ }
1438+ } else if (req->dir == IOREQ_WRITE) {
1439+ for (i = 0; i < req->count; i++) {
1440+ read_phys_req_item(req->data, req, i, &tmp);
1441+ write_phys_req_item(req->addr, req, i, &tmp);
1442+ }
1443+ }
1444+ }
1445+}
1446+
1447+static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
1448+{
1449+ uint32_t sbdf = req->addr >> 32;
1450+ uint32_t reg = req->addr;
1451+ XenPciDevice *xendev;
1452+
1453+ if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
1454+ req->size != sizeof(uint32_t)) {
1455+ hw_error("PCI config access: bad size (%u)", req->size);
1456+ }
1457+
1458+ if (req->count != 1) {
1459+ hw_error("PCI config access: bad count (%u)", req->count);
1460+ }
1461+
1462+ QLIST_FOREACH(xendev, &state->dev_list, entry) {
1463+ if (xendev->sbdf != sbdf) {
1464+ continue;
1465+ }
1466+
1467+ if (!req->data_is_ptr) {
1468+ if (req->dir == IOREQ_READ) {
1469+ req->data = pci_host_config_read_common(
1470+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1471+ req->size);
1472+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
1473+ req->size, req->data);
1474+ } else if (req->dir == IOREQ_WRITE) {
1475+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
1476+ req->size, req->data);
1477+ pci_host_config_write_common(
1478+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1479+ req->data, req->size);
1480+ }
1481+ } else {
1482+ uint32_t tmp;
1483+
1484+ if (req->dir == IOREQ_READ) {
1485+ tmp = pci_host_config_read_common(
1486+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1487+ req->size);
1488+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
1489+ req->size, tmp);
1490+ write_phys_req_item(req->data, req, 0, &tmp);
1491+ } else if (req->dir == IOREQ_WRITE) {
1492+ read_phys_req_item(req->data, req, 0, &tmp);
1493+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
1494+ req->size, tmp);
1495+ pci_host_config_write_common(
1496+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
1497+ tmp, req->size);
1498+ }
1499+ }
1500+ }
1501+}
1502+
1503+static void handle_ioreq(XenIOState *state, ioreq_t *req)
1504+{
1505+ trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
1506+ req->addr, req->data, req->count, req->size);
1507+
1508+ if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
1509+ (req->size < sizeof (target_ulong))) {
1510+ req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
1511+ }
1512+
1513+ if (req->dir == IOREQ_WRITE)
1514+ trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
1515+ req->addr, req->data, req->count, req->size);
1516+
1517+ switch (req->type) {
1518+ case IOREQ_TYPE_PIO:
1519+ cpu_ioreq_pio(req);
1520+ break;
1521+ case IOREQ_TYPE_COPY:
1522+ cpu_ioreq_move(req);
1523+ break;
1524+ case IOREQ_TYPE_TIMEOFFSET:
1525+ break;
1526+ case IOREQ_TYPE_INVALIDATE:
1527+ xen_invalidate_map_cache();
1528+ break;
1529+ case IOREQ_TYPE_PCI_CONFIG:
1530+ cpu_ioreq_config(state, req);
1531+ break;
1532+ default:
1533+ arch_handle_ioreq(state, req);
1534+ }
1535+ if (req->dir == IOREQ_READ) {
1536+ trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
1537+ req->addr, req->data, req->count, req->size);
1538+ }
1539+}
1540+
1541+static bool handle_buffered_iopage(XenIOState *state)
1542+{
1543+ buffered_iopage_t *buf_page = state->buffered_io_page;
1544+ buf_ioreq_t *buf_req = NULL;
1545+ bool handled_ioreq = false;
1546+ ioreq_t req;
1547+ int qw;
1548+
1549+ if (!buf_page) {
1550+ return 0;
1551+ }
1552+
1553+ memset(&req, 0x00, sizeof(req));
1554+ req.state = STATE_IOREQ_READY;
1555+ req.count = 1;
1556+ req.dir = IOREQ_WRITE;
1557+
1558+ for (;;) {
1559+ uint32_t rdptr = buf_page->read_pointer, wrptr;
1560+
1561+ xen_rmb();
1562+ wrptr = buf_page->write_pointer;
1563+ xen_rmb();
1564+ if (rdptr != buf_page->read_pointer) {
1565+ continue;
1566+ }
1567+ if (rdptr == wrptr) {
1568+ break;
1569+ }
1570+ buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
1571+ req.size = 1U << buf_req->size;
1572+ req.addr = buf_req->addr;
1573+ req.data = buf_req->data;
1574+ req.type = buf_req->type;
1575+ xen_rmb();
1576+ qw = (req.size == 8);
1577+ if (qw) {
1578+ if (rdptr + 1 == wrptr) {
1579+ hw_error("Incomplete quad word buffered ioreq");
1580+ }
1581+ buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
1582+ IOREQ_BUFFER_SLOT_NUM];
1583+ req.data |= ((uint64_t)buf_req->data) << 32;
1584+ xen_rmb();
1585+ }
1586+
1587+ handle_ioreq(state, &req);
1588+
1589+ /* Only req.data may get updated by handle_ioreq(), albeit even that
1590+ * should not happen as such data would never make it to the guest (we
1591+ * can only usefully see writes here after all).
1592+ */
1593+ assert(req.state == STATE_IOREQ_READY);
1594+ assert(req.count == 1);
1595+ assert(req.dir == IOREQ_WRITE);
1596+ assert(!req.data_is_ptr);
1597+
1598+ qatomic_add(&buf_page->read_pointer, qw + 1);
1599+ }
1600+
1601+ return handled_ioreq;
1602+}
1603+
1604+static void handle_buffered_io(void *opaque)
1605+{
1606+ XenIOState *state = opaque;
1607+
1608+ if (handle_buffered_iopage(state)) {
1609+ timer_mod(state->buffered_io_timer,
1610+ BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1611+ } else {
1612+ timer_del(state->buffered_io_timer);
1613+ xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1614+ }
1615+}
1616+
1617+static void cpu_handle_ioreq(void *opaque)
1618+{
1619+ XenIOState *state = opaque;
1620+ ioreq_t *req = cpu_get_ioreq(state);
1621+
1622+ handle_buffered_iopage(state);
1623+ if (req) {
1624+ ioreq_t copy = *req;
1625+
1626+ xen_rmb();
1627+ handle_ioreq(state, &copy);
1628+ req->data = copy.data;
1629+
1630+ if (req->state != STATE_IOREQ_INPROCESS) {
1631+ fprintf(stderr, "Badness in I/O request ... not in service?!: "
1632+ "%x, ptr: %x, port: %"PRIx64", "
1633+ "data: %"PRIx64", count: %u, size: %u, type: %u\n",
1634+ req->state, req->data_is_ptr, req->addr,
1635+ req->data, req->count, req->size, req->type);
1636+ destroy_hvm_domain(false);
1637+ return;
1638+ }
1639+
1640+ xen_wmb(); /* Update ioreq contents /then/ update state. */
1641+
1642+ /*
1643+ * We do this before we send the response so that the tools
1644+ * have the opportunity to pick up on the reset before the
1645+ * guest resumes and does a hlt with interrupts disabled which
1646+ * causes Xen to powerdown the domain.
1647+ */
1648+ if (runstate_is_running()) {
1649+ ShutdownCause request;
1650+
1651+ if (qemu_shutdown_requested_get()) {
1652+ destroy_hvm_domain(false);
1653+ }
1654+ request = qemu_reset_requested_get();
1655+ if (request) {
1656+ qemu_system_reset(request);
1657+ destroy_hvm_domain(true);
1658+ }
1659+ }
1660+
1661+ req->state = STATE_IORESP_READY;
1662+ xenevtchn_notify(state->xce_handle,
1663+ state->ioreq_local_port[state->send_vcpu]);
1664+ }
1665+}
1666+
1667+static void xen_main_loop_prepare(XenIOState *state)
1668+{
1669+ int evtchn_fd = -1;
1670+
1671+ if (state->xce_handle != NULL) {
1672+ evtchn_fd = xenevtchn_fd(state->xce_handle);
1673+ }
1674+
1675+ state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1676+ state);
1677+
1678+ if (evtchn_fd != -1) {
1679+ CPUState *cpu_state;
1680+
1681+ DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1682+ CPU_FOREACH(cpu_state) {
1683+ DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1684+ __func__, cpu_state->cpu_index, cpu_state);
1685+ state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1686+ }
1687+ qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1688+ }
1689+}
1690+
1691+
1692+void xen_hvm_change_state_handler(void *opaque, bool running,
1693+ RunState rstate)
1694+{
1695+ XenIOState *state = opaque;
1696+
1697+ if (running) {
1698+ xen_main_loop_prepare(state);
1699+ }
1700+
1701+ xen_set_ioreq_server_state(xen_domid,
1702+ state->ioservid,
1703+ (rstate == RUN_STATE_RUNNING));
1704+}
1705+
1706+void xen_exit_notifier(Notifier *n, void *data)
1707+{
1708+ XenIOState *state = container_of(n, XenIOState, exit);
1709+
1710+ xen_destroy_ioreq_server(xen_domid, state->ioservid);
1711+ if (state->fres != NULL) {
1712+ xenforeignmemory_unmap_resource(xen_fmem, state->fres);
1713+ }
1714+
1715+ xenevtchn_close(state->xce_handle);
1716+ xs_daemon_close(state->xenstore);
1717+}
1718+
1719+static int xen_map_ioreq_server(XenIOState *state)
1720+{
1721+ void *addr = NULL;
1722+ xen_pfn_t ioreq_pfn;
1723+ xen_pfn_t bufioreq_pfn;
1724+ evtchn_port_t bufioreq_evtchn;
1725+ int rc;
1726+
1727+ /*
1728+ * Attempt to map using the resource API and fall back to normal
1729+ * foreign mapping if this is not supported.
1730+ */
1731+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
1732+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
1733+ state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
1734+ XENMEM_resource_ioreq_server,
1735+ state->ioservid, 0, 2,
1736+ &addr,
1737+ PROT_READ | PROT_WRITE, 0);
1738+ if (state->fres != NULL) {
1739+ trace_xen_map_resource_ioreq(state->ioservid, addr);
1740+ state->buffered_io_page = addr;
1741+ state->shared_page = addr + TARGET_PAGE_SIZE;
1742+ } else if (errno != EOPNOTSUPP) {
1743+ error_report("failed to map ioreq server resources: error %d handle=%p",
1744+ errno, xen_xc);
1745+ return -1;
1746+ }
1747+
1748+ rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
1749+ (state->shared_page == NULL) ?
1750+ &ioreq_pfn : NULL,
1751+ (state->buffered_io_page == NULL) ?
1752+ &bufioreq_pfn : NULL,
1753+ &bufioreq_evtchn);
1754+ if (rc < 0) {
1755+ error_report("failed to get ioreq server info: error %d handle=%p",
1756+ errno, xen_xc);
1757+ return rc;
1758+ }
1759+
1760+ if (state->shared_page == NULL) {
1761+ DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1762+
1763+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
1764+ PROT_READ | PROT_WRITE,
1765+ 1, &ioreq_pfn, NULL);
1766+ if (state->shared_page == NULL) {
1767+ error_report("map shared IO page returned error %d handle=%p",
1768+ errno, xen_xc);
1769+ }
1770+ }
1771+
1772+ if (state->buffered_io_page == NULL) {
1773+ DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1774+
1775+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
1776+ PROT_READ | PROT_WRITE,
1777+ 1, &bufioreq_pfn,
1778+ NULL);
1779+ if (state->buffered_io_page == NULL) {
1780+ error_report("map buffered IO page returned error %d", errno);
1781+ return -1;
1782+ }
1783+ }
1784+
1785+ if (state->shared_page == NULL || state->buffered_io_page == NULL) {
1786+ return -1;
1787+ }
1788+
1789+ DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1790+
1791+ state->bufioreq_remote_port = bufioreq_evtchn;
1792+
1793+ return 0;
1794+}
1795+
1796+void xen_shutdown_fatal_error(const char *fmt, ...)
1797+{
1798+ va_list ap;
1799+
1800+ va_start(ap, fmt);
1801+ vfprintf(stderr, fmt, ap);
1802+ va_end(ap);
1803+ fprintf(stderr, "Will destroy the domain.\n");
1804+ /* destroy the domain */
1805+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
1806+}
1807+
1808+void destroy_hvm_domain(bool reboot)
1809+{
1810+ xc_interface *xc_handle;
1811+ int sts;
1812+ int rc;
1813+
1814+ unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
1815+
1816+ if (xen_dmod) {
1817+ rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
1818+ if (!rc) {
1819+ return;
1820+ }
1821+ if (errno != ENOTTY /* old Xen */) {
1822+ perror("xendevicemodel_shutdown failed");
1823+ }
1824+ /* well, try the old thing then */
1825+ }
1826+
1827+ xc_handle = xc_interface_open(0, 0, 0);
1828+ if (xc_handle == NULL) {
1829+ fprintf(stderr, "Cannot acquire xenctrl handle\n");
1830+ } else {
1831+ sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
1832+ if (sts != 0) {
1833+ fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1834+ "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1835+ sts, strerror(errno));
1836+ } else {
1837+ fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1838+ reboot ? "reboot" : "poweroff");
1839+ }
1840+ xc_interface_close(xc_handle);
1841+ }
1842+}
1843+
1844+void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
1845+ Error **errp)
1846+{
1847+ unsigned long nr_pfn;
1848+ xen_pfn_t *pfn_list;
1849+ int i;
1850+
1851+ if (runstate_check(RUN_STATE_INMIGRATE)) {
1852+ /* RAM already populated in Xen */
1853+ fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
1854+ " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
1855+ __func__, size, ram_addr);
1856+ return;
1857+ }
1858+
1859+ if (mr == &ram_memory) {
1860+ return;
1861+ }
1862+
1863+ trace_xen_ram_alloc(ram_addr, size);
1864+
1865+ nr_pfn = size >> TARGET_PAGE_BITS;
1866+ pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
1867+
1868+ for (i = 0; i < nr_pfn; i++) {
1869+ pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
1870+ }
1871+
1872+ if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
1873+ error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
1874+ ram_addr);
1875+ }
1876+
1877+ g_free(pfn_list);
1878+}
1879+
1880+void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
1881+ MemoryListener xen_memory_listener)
1882+{
1883+ int i, rc;
1884+
1885+ state->xce_handle = xenevtchn_open(NULL, 0);
1886+ if (state->xce_handle == NULL) {
1887+ perror("xen: event channel open");
1888+ goto err;
1889+ }
1890+
1891+ state->xenstore = xs_daemon_open();
1892+ if (state->xenstore == NULL) {
1893+ perror("xen: xenstore open");
1894+ goto err;
1895+ }
1896+
1897+ xen_create_ioreq_server(xen_domid, &state->ioservid);
1898+
1899+ state->exit.notify = xen_exit_notifier;
1900+ qemu_add_exit_notifier(&state->exit);
1901+
1902+ /*
1903+ * Register wake-up support in QMP query-current-machine API
1904+ */
1905+ qemu_register_wakeup_support();
1906+
1907+ rc = xen_map_ioreq_server(state);
1908+ if (rc < 0) {
1909+ goto err;
1910+ }
1911+
1912+ /* Note: cpus is empty at this point in init */
1913+ state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
1914+
1915+ rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
1916+ if (rc < 0) {
1917+ error_report("failed to enable ioreq server info: error %d handle=%p",
1918+ errno, xen_xc);
1919+ goto err;
1920+ }
1921+
1922+ state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
1923+
1924+ /* FIXME: how about if we overflow the page here? */
1925+ for (i = 0; i < max_cpus; i++) {
1926+ rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1927+ xen_vcpu_eport(state->shared_page, i));
1928+ if (rc == -1) {
1929+ error_report("shared evtchn %d bind error %d", i, errno);
1930+ goto err;
1931+ }
1932+ state->ioreq_local_port[i] = rc;
1933+ }
1934+
1935+ rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1936+ state->bufioreq_remote_port);
1937+ if (rc == -1) {
1938+ error_report("buffered evtchn bind error %d", errno);
1939+ goto err;
1940+ }
1941+ state->bufioreq_local_port = rc;
1942+
1943+ /* Init RAM management */
1944+#ifdef XEN_COMPAT_PHYSMAP
1945+ xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1946+#else
1947+ xen_map_cache_init(NULL, state);
1948+#endif
1949+
1950+ qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1951+
1952+ state->memory_listener = xen_memory_listener;
1953+ memory_listener_register(&state->memory_listener, &address_space_memory);
1954+
1955+ state->io_listener = xen_io_listener;
1956+ memory_listener_register(&state->io_listener, &address_space_io);
1957+
1958+ state->device_listener = xen_device_listener;
1959+ QLIST_INIT(&state->dev_list);
1960+ device_listener_register(&state->device_listener);
1961+
1962+ xen_bus_init();
1963+
1964+ /* Initialize backend core & drivers */
1965+ if (xen_be_init() != 0) {
1966+ error_report("xen backend core setup failed");
1967+ goto err;
1968+ }
1969+ xen_be_register_common();
1970+
1971+ return;
1972+err:
1973+ error_report("xen hardware virtual machine initialisation failed");
1974+ exit(1);
1975+}
1976diff --git a/include/hw/i386/xen_arch_hvm.h b/include/hw/i386/xen_arch_hvm.h
1977index 1b2c71ba4f..1000f8f543 100644
1978--- a/include/hw/i386/xen_arch_hvm.h
1979+++ b/include/hw/i386/xen_arch_hvm.h
1980@@ -2,6 +2,7 @@
1981 #define HW_XEN_ARCH_I386_HVM_H
1982
1983 #include <xen/hvm/ioreq.h>
1984+#include "hw/xen/xen-hvm-common.h"
1985
1986 void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
1987 void arch_xen_set_memory(XenIOState *state,
1988diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
1989new file mode 100644
1990index 0000000000..2979f84ee2
1991--- /dev/null
1992+++ b/include/hw/xen/xen-hvm-common.h
1993@@ -0,0 +1,98 @@
1994+#ifndef HW_XEN_HVM_COMMON_H
1995+#define HW_XEN_HVM_COMMON_H
1996+
1997+#include "qemu/osdep.h"
1998+#include "qemu/units.h"
1999+
2000+#include "cpu.h"
2001+#include "hw/pci/pci.h"
2002+#include "hw/hw.h"
2003+#include "hw/xen/xen_common.h"
2004+#include "sysemu/runstate.h"
2005+#include "sysemu/sysemu.h"
2006+#include "sysemu/xen.h"
2007+#include "sysemu/xen-mapcache.h"
2008+
2009+#include <xen/hvm/ioreq.h>
2010+
2011+extern MemoryRegion ram_memory;
2012+extern MemoryListener xen_io_listener;
2013+extern DeviceListener xen_device_listener;
2014+
2015+//#define DEBUG_XEN_HVM
2016+
2017+#ifdef DEBUG_XEN_HVM
2018+#define DPRINTF(fmt, ...) \
2019+ do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
2020+#else
2021+#define DPRINTF(fmt, ...) \
2022+ do { } while (0)
2023+#endif
2024+
2025+static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
2026+{
2027+ return shared_page->vcpu_ioreq[i].vp_eport;
2028+}
2029+static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
2030+{
2031+ return &shared_page->vcpu_ioreq[vcpu];
2032+}
2033+
2034+#define BUFFER_IO_MAX_DELAY 100
2035+
2036+typedef struct XenPhysmap {
2037+ hwaddr start_addr;
2038+ ram_addr_t size;
2039+ const char *name;
2040+ hwaddr phys_offset;
2041+
2042+ QLIST_ENTRY(XenPhysmap) list;
2043+} XenPhysmap;
2044+
2045+typedef struct XenPciDevice {
2046+ PCIDevice *pci_dev;
2047+ uint32_t sbdf;
2048+ QLIST_ENTRY(XenPciDevice) entry;
2049+} XenPciDevice;
2050+
2051+typedef struct XenIOState {
2052+ ioservid_t ioservid;
2053+ shared_iopage_t *shared_page;
2054+ buffered_iopage_t *buffered_io_page;
2055+ xenforeignmemory_resource_handle *fres;
2056+ QEMUTimer *buffered_io_timer;
2057+ CPUState **cpu_by_vcpu_id;
2058+ /* the evtchn port for polling the notification, */
2059+ evtchn_port_t *ioreq_local_port;
2060+ /* evtchn remote and local ports for buffered io */
2061+ evtchn_port_t bufioreq_remote_port;
2062+ evtchn_port_t bufioreq_local_port;
2063+ /* the evtchn fd for polling */
2064+ xenevtchn_handle *xce_handle;
2065+ /* which vcpu we are serving */
2066+ int send_vcpu;
2067+
2068+ struct xs_handle *xenstore;
2069+ MemoryListener memory_listener;
2070+ MemoryListener io_listener;
2071+ QLIST_HEAD(, XenPciDevice) dev_list;
2072+ DeviceListener device_listener;
2073+
2074+ Notifier exit;
2075+} XenIOState;
2076+
2077+void xen_exit_notifier(Notifier *n, void *data);
2078+
2079+void xen_region_add(MemoryListener *listener, MemoryRegionSection *section);
2080+void xen_region_del(MemoryListener *listener, MemoryRegionSection *section);
2081+void xen_io_add(MemoryListener *listener, MemoryRegionSection *section);
2082+void xen_io_del(MemoryListener *listener, MemoryRegionSection *section);
2083+void xen_device_realize(DeviceListener *listener, DeviceState *dev);
2084+void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);
2085+
2086+void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
2087+void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
2088+ MemoryListener xen_memory_listener);
2089+
2090+void cpu_ioreq_pio(ioreq_t *req);
2091+#endif /* HW_XEN_HVM_COMMON_H */
2092--
20932.17.1
2094
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-mapcache-Fix-build-on-Arm.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-mapcache-Fix-build-on-Arm.patch
new file mode 100644
index 00000000..7ff202ff
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-mapcache-Fix-build-on-Arm.patch
@@ -0,0 +1,37 @@
1From 2aca3ff63a5d5897cd32e0030569623f0c454f2c Mon Sep 17 00:00:00 2001
2From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
3Date: Mon, 19 Sep 2022 21:59:55 +0300
4Subject: [PATCH 7/8] xen-mapcache: Fix build on Arm
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9../hw/xen/xen-mapcache.c: In function ‘xen_map_grant_dyn’:
10../hw/xen/xen-mapcache.c:668:9: error: ‘refs’ may be used uninitialized
11 in this function [-Werror=maybe-uninitialized]
12 668 | g_free(refs);
13 | ^~~~~~~~~~~~
14cc1: all warnings being treated as errors
15
16Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
17Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
18---
19 hw/xen/xen-mapcache.c | 2 +-
20 1 file changed, 1 insertion(+), 1 deletion(-)
21
22diff --git a/hw/xen/xen-mapcache.c b/hw/xen/xen-mapcache.c
23index f81b75d216..6544e331e0 100644
24--- a/hw/xen/xen-mapcache.c
25+++ b/hw/xen/xen-mapcache.c
26@@ -620,7 +620,7 @@ static void *xen_map_grant_dyn(MemoryRegion **mr, hwaddr addr, hwaddr *plen,
27 unsigned int i;
28 unsigned int nrefs = (page_off + *plen + XC_PAGE_SIZE - 1) >> XC_PAGE_SHIFT;
29 uint32_t ref = (addr - XEN_GRANT_ADDR_OFF) >> XC_PAGE_SHIFT;
30- uint32_t *refs;
31+ uint32_t *refs = NULL;
32 unsigned int prot = PROT_READ;
33 struct XENMappedGrantRegion *mgr = NULL;
34
35--
362.25.1
37
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-skip-ioreq-creation-on-ioreq-registration-failur.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-skip-ioreq-creation-on-ioreq-registration-failur.patch
new file mode 100644
index 00000000..83a18c08
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0007-xen-skip-ioreq-creation-on-ioreq-registration-failur.patch
@@ -0,0 +1,42 @@
1From fa475ec44fc78ff246e6536c8b9d408abadbb4a4 Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Fri, 1 Jul 2022 18:50:59 -0700
4Subject: [PATCH 07/16] xen: skip ioreq creation on ioreq registration failure
5
6On ARM it is possible to have a functioning xenpv machine with only the
7PV backends and no IOREQ server. If the IOREQ server creation fails
8continue to the PV backends initialization.
9
10Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
11---
12 hw/xen/xen-hvm-common.c | 7 ++++++-
13 1 file changed, 6 insertions(+), 1 deletion(-)
14
15diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
16index 67f76f6010..7e7d23397f 100644
17--- a/hw/xen/xen-hvm-common.c
18+++ b/hw/xen/xen-hvm-common.c
19@@ -780,7 +780,11 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
20 goto err;
21 }
22
23- xen_create_ioreq_server(xen_domid, &state->ioservid);
24+ rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
25+ if (rc) {
26+ DPRINTF("xen: failed to create ioreq server\n");
27+ goto no_ioreq;
28+ }
29
30 state->exit.notify = xen_exit_notifier;
31 qemu_add_exit_notifier(&state->exit);
32@@ -845,6 +849,7 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
33 QLIST_INIT(&state->dev_list);
34 device_listener_register(&state->device_listener);
35
36+no_ioreq:
37 xen_bus_init();
38
39 /* Initialize backend core & drivers */
40--
412.17.1
42
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-accel-xen-xen-all-export-xenstore_record_dm_state.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-accel-xen-xen-all-export-xenstore_record_dm_state.patch
new file mode 100644
index 00000000..881076fb
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-accel-xen-xen-all-export-xenstore_record_dm_state.patch
@@ -0,0 +1,48 @@
1From 13443fe86bb100849c55b41873f48e0b121c7bc0 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 1 Jul 2022 17:28:14 -0700
4Subject: [PATCH 08/16] accel/xen/xen-all: export xenstore_record_dm_state
5
6Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
7Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
8Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
9---
10 accel/xen/xen-all.c | 2 +-
11 include/hw/xen/xen.h | 2 ++
12 2 files changed, 3 insertions(+), 1 deletion(-)
13
14diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
15index 69aa7d018b..276625b78b 100644
16--- a/accel/xen/xen-all.c
17+++ b/accel/xen/xen-all.c
18@@ -100,7 +100,7 @@ void xenstore_store_pv_console_info(int i, Chardev *chr)
19 }
20
21
22-static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
23+void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
24 {
25 char path[50];
26
27diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
28index afdf9c436a..31e9538a5c 100644
29--- a/include/hw/xen/xen.h
30+++ b/include/hw/xen/xen.h
31@@ -9,6 +9,7 @@
32 */
33
34 #include "exec/cpu-common.h"
35+#include <xenstore.h>
36
37 /* xen-machine.c */
38 enum xen_mode {
39@@ -31,5 +32,6 @@ qemu_irq *xen_interrupt_controller_init(void);
40 void xenstore_store_pv_console_info(int i, Chardev *chr);
41
42 void xen_register_framebuffer(struct MemoryRegion *mr);
43+void xenstore_record_dm_state(struct xs_handle *xs, const char *state);
44
45 #endif /* QEMU_HW_XEN_H */
46--
472.17.1
48
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-hw-arm-Add-grant-mapping.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-hw-arm-Add-grant-mapping.patch
new file mode 100644
index 00000000..3b83d229
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0008-hw-arm-Add-grant-mapping.patch
@@ -0,0 +1,39 @@
1From b5e5f60de37bb6f71bc34ecb989c31ef5c834272 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Tue, 31 Jan 2023 21:46:43 +0000
4Subject: [PATCH 8/8] hw: arm: Add grant mapping.
5
6Add support for grant mapping and change qemu machine name to xenpvh.
7
8Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
9Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
10---
11 hw/arm/xen_arm.c | 5 ++++-
12 1 file changed, 4 insertions(+), 1 deletion(-)
13
14diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
15index 4ac425a3c5..392bed7367 100644
16--- a/hw/arm/xen_arm.c
17+++ b/hw/arm/xen_arm.c
18@@ -35,7 +35,7 @@
19 #include "sysemu/tpm.h"
20 #include "hw/xen/arch_hvm.h"
21
22-#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpv")
23+#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpvh")
24 OBJECT_DECLARE_SIMPLE_TYPE(XenArmState, XEN_ARM)
25
26 static MemoryListener xen_memory_listener = {
27@@ -115,6 +115,9 @@ static void xen_init_ram(MachineState *machine)
28 DPRINTF("Initialized region xen.ram.hi: base 0x%llx size 0x%lx\n",
29 GUEST_RAM1_BASE, ram_size[1]);
30 }
31+
32+ DPRINTF("init grant ram mapping for XEN\n");
33+ ram_grants = *xen_init_grant_ram();
34 }
35
36 void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
37--
382.25.1
39
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0009-xen-hvm-enable-xen-hvm-common-build-for-ARM.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0009-xen-hvm-enable-xen-hvm-common-build-for-ARM.patch
new file mode 100644
index 00000000..1b1aea76
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0009-xen-hvm-enable-xen-hvm-common-build-for-ARM.patch
@@ -0,0 +1,43 @@
1From 2e6a9f464fd1f247c41ce3666ff3e3f66920d0b7 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 1 Jul 2022 17:28:15 -0700
4Subject: [PATCH 09/16] xen-hvm: enable xen-hvm-common build for ARM
5
6Add CONFIG_XEN for aarch64 device and change xen-hvm-common.c to
7support build for ARM targets.
8
9Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
10Acked-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
11Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
12---
13 hw/arm/meson.build | 1 +
14 meson.build | 2 +-
15 2 files changed, 2 insertions(+), 1 deletion(-)
16
17diff --git a/hw/arm/meson.build b/hw/arm/meson.build
18index 92f9f6e000..3aac913bfd 100644
19--- a/hw/arm/meson.build
20+++ b/hw/arm/meson.build
21@@ -62,5 +62,6 @@ arm_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.
22 arm_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c', 'smmuv3.c'))
23 arm_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c'))
24 arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c'))
25+arm_ss.add_all(xen_ss)
26
27 hw_arch += {'arm': arm_ss}
28diff --git a/meson.build b/meson.build
29index 5c6b5a1c75..b94f0cd76e 100644
30--- a/meson.build
31+++ b/meson.build
32@@ -125,7 +125,7 @@ endif
33 if cpu in ['x86', 'x86_64', 'arm', 'aarch64']
34 # i386 emulator provides xenpv machine type for multiple architectures
35 accelerator_targets += {
36- 'CONFIG_XEN': ['i386-softmmu', 'x86_64-softmmu'],
37+ 'CONFIG_XEN': ['i386-softmmu', 'x86_64-softmmu', 'aarch64-softmmu'],
38 }
39 endif
40 if cpu in ['x86', 'x86_64']
41--
422.17.1
43
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0010-hw-arm-introduce-xenpv-machine.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0010-hw-arm-introduce-xenpv-machine.patch
new file mode 100644
index 00000000..fc979b52
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0010-hw-arm-introduce-xenpv-machine.patch
@@ -0,0 +1,230 @@
1From 5618a18b1f12d567a8ef85240d55b841e18ef472 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 1 Jul 2022 17:28:16 -0700
4Subject: [PATCH 10/16] hw/arm: introduce xenpv machine
5
6Create a new machine xenpv which creates a IOREQ server to connect
7with Xen. It also creates a tpm-tis-device which connects to swtpm to
8support TPM functionalities.
9
10Xen IOREQ connection expect the TARGET_PAGE_SIZE to 4096, and the xenpv
11machine on ARM will have no CPU definitions. We need to define
12TARGET_PAGE_SIZE appropriately ourselves.
13
14Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
15Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
16Reviewed-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
17---
18 hw/arm/meson.build | 1 +
19 hw/arm/xen_arm.c | 156 ++++++++++++++++++++++++++++++++++
20 include/hw/arm/xen_arch_hvm.h | 12 +++
21 include/hw/xen/arch_hvm.h | 2 +
22 4 files changed, 171 insertions(+)
23 create mode 100644 hw/arm/xen_arm.c
24 create mode 100644 include/hw/arm/xen_arch_hvm.h
25
26diff --git a/hw/arm/meson.build b/hw/arm/meson.build
27index 3aac913bfd..0cae024374 100644
28--- a/hw/arm/meson.build
29+++ b/hw/arm/meson.build
30@@ -62,6 +62,7 @@ arm_ss.add(when: 'CONFIG_FSL_IMX7', if_true: files('fsl-imx7.c', 'mcimx7d-sabre.
31 arm_ss.add(when: 'CONFIG_ARM_SMMUV3', if_true: files('smmu-common.c', 'smmuv3.c'))
32 arm_ss.add(when: 'CONFIG_FSL_IMX6UL', if_true: files('fsl-imx6ul.c', 'mcimx6ul-evk.c'))
33 arm_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_soc.c'))
34+arm_ss.add(when: 'CONFIG_XEN', if_true: files('xen_arm.c'))
35 arm_ss.add_all(xen_ss)
36
37 hw_arch += {'arm': arm_ss}
38diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
39new file mode 100644
40index 0000000000..0922e3db84
41--- /dev/null
42+++ b/hw/arm/xen_arm.c
43@@ -0,0 +1,156 @@
44+/*
45+ * QEMU ARM Xen PV Machine
46+ *
47+ *
48+ * Permission is hereby granted, free of charge, to any person obtaining a copy
49+ * of this software and associated documentation files (the "Software"), to deal
50+ * in the Software without restriction, including without limitation the rights
51+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
52+ * copies of the Software, and to permit persons to whom the Software is
53+ * furnished to do so, subject to the following conditions:
54+ *
55+ * The above copyright notice and this permission notice shall be included in
56+ * all copies or substantial portions of the Software.
57+ *
58+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
59+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
60+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
61+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
62+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
63+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
64+ * THE SOFTWARE.
65+ */
66+
67+#include "qemu/osdep.h"
68+#include "qemu/error-report.h"
69+#include "qapi/qapi-commands-migration.h"
70+#include "hw/boards.h"
71+#include "hw/sysbus.h"
72+#include "sysemu/block-backend.h"
73+#include "sysemu/tpm_backend.h"
74+#include "sysemu/sysemu.h"
75+#include "hw/xen/xen-legacy-backend.h"
76+#include "hw/xen/xen-hvm-common.h"
77+#include "sysemu/tpm.h"
78+#include "hw/xen/arch_hvm.h"
79+
80+#define TYPE_XEN_ARM MACHINE_TYPE_NAME("xenpv")
81+OBJECT_DECLARE_SIMPLE_TYPE(XenArmState, XEN_ARM)
82+
83+static MemoryListener xen_memory_listener = {
84+ .region_add = xen_region_add,
85+ .region_del = xen_region_del,
86+ .log_start = NULL,
87+ .log_stop = NULL,
88+ .log_sync = NULL,
89+ .log_global_start = NULL,
90+ .log_global_stop = NULL,
91+ .priority = 10,
92+};
93+
94+struct XenArmState {
95+ /*< private >*/
96+ MachineState parent;
97+
98+ XenIOState *state;
99+};
100+
101+void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
102+{
103+ hw_error("Invalid ioreq type 0x%x\n", req->type);
104+
105+ return;
106+}
107+
108+void arch_xen_set_memory(XenIOState *state,MemoryRegionSection *section,
109+ bool add)
110+{
111+}
112+
113+void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length)
114+{
115+}
116+
117+void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
118+{
119+}
120+
121+static int xen_init_ioreq(XenIOState *state, unsigned int max_cpus)
122+{
123+ xen_dmod = xendevicemodel_open(0, 0);
124+ xen_xc = xc_interface_open(0, 0, 0);
125+
126+ if (xen_xc == NULL) {
127+ perror("xen: can't open xen interface\n");
128+ return -1;
129+ }
130+
131+ xen_fmem = xenforeignmemory_open(0, 0);
132+ if (xen_fmem == NULL) {
133+ perror("xen: can't open xen fmem interface\n");
134+ xc_interface_close(xen_xc);
135+ return -1;
136+ }
137+
138+ xen_register_ioreq(state, max_cpus, xen_memory_listener);
139+
140+ xenstore_record_dm_state(xenstore, "running");
141+
142+ return 0;
143+}
144+
145+
146+static void xen_arm_init(MachineState *machine)
147+{
148+ DeviceState *dev;
149+ SysBusDevice *busdev;
150+ Error *errp = NULL;
151+ XenArmState *xam = XEN_ARM(machine);
152+
153+ xam->state = g_new0(XenIOState, 1);
154+
155+ if (xen_init_ioreq(xam->state, machine->smp.cpus)) {
156+ return;
157+ }
158+
159+ TPMBackend *be = qemu_find_tpm_be("tpm0");
160+ if (be == NULL) {
161+ DPRINTF("Couldn't fine the backend for tpm0\n");
162+ return;
163+ }
164+
165+ dev = qdev_new(TYPE_TPM_TIS_SYSBUS);
166+ object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp);
167+ object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp);
168+ busdev = SYS_BUS_DEVICE(dev);
169+ sysbus_realize_and_unref(busdev, &error_fatal);
170+ sysbus_mmio_map(busdev, 0, GUEST_TPM_BASE);
171+
172+ DPRINTF("Connected tpmdev at address 0x%lx\n", GUEST_TPM_BASE);
173+
174+ return;
175+}
176+
177+static void xen_arm_machine_class_init(ObjectClass *oc, void *data)
178+{
179+
180+ MachineClass *mc = MACHINE_CLASS(oc);
181+ mc->desc = "Xen Para-virtualized PC";
182+ mc->init = xen_arm_init;
183+ mc->max_cpus = 1;
184+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
185+}
186+
187+static const TypeInfo xen_arm_machine_type = {
188+ .name = TYPE_XEN_ARM,
189+ .parent = TYPE_MACHINE,
190+ .class_init = xen_arm_machine_class_init,
191+ .instance_size = sizeof(XenArmState),
192+};
193+
194+static void xen_arm_machine_register_types(void)
195+{
196+ type_register_static(&xen_arm_machine_type);
197+}
198+
199+type_init(xen_arm_machine_register_types)
200diff --git a/include/hw/arm/xen_arch_hvm.h b/include/hw/arm/xen_arch_hvm.h
201new file mode 100644
202index 0000000000..f645dfec28
203--- /dev/null
204+++ b/include/hw/arm/xen_arch_hvm.h
205@@ -0,0 +1,12 @@
206+#ifndef HW_XEN_ARCH_ARM_HVM_H
207+#define HW_XEN_ARCH_ARM_HVM_H
208+
209+#include <xen/hvm/ioreq.h>
210+void arch_handle_ioreq(XenIOState *state, ioreq_t *req);
211+void arch_xen_set_memory(XenIOState *state,
212+ MemoryRegionSection *section,
213+ bool add);
214+
215+#undef TARGET_PAGE_SIZE
216+#define TARGET_PAGE_SIZE 4096
217+#endif
218diff --git a/include/hw/xen/arch_hvm.h b/include/hw/xen/arch_hvm.h
219index 26674648d8..c7c515220d 100644
220--- a/include/hw/xen/arch_hvm.h
221+++ b/include/hw/xen/arch_hvm.h
222@@ -1,3 +1,5 @@
223 #if defined(TARGET_I386) || defined(TARGET_X86_64)
224 #include "hw/i386/xen_arch_hvm.h"
225+#elif defined(TARGET_ARM) || defined(TARGET_ARM_64)
226+#include "hw/arm/xen_arch_hvm.h"
227 #endif
228--
2292.17.1
230
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0011-meson.build-do-not-set-have_xen_pci_passthrough-for-.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0011-meson.build-do-not-set-have_xen_pci_passthrough-for-.patch
new file mode 100644
index 00000000..dad3029f
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0011-meson.build-do-not-set-have_xen_pci_passthrough-for-.patch
@@ -0,0 +1,33 @@
1From f4ff3490639dea08fb70ec69d60fe73ef479073b Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Thu, 7 Jul 2022 14:03:41 -0700
4Subject: [PATCH 11/16] meson.build: do not set have_xen_pci_passthrough for
5 aarch64 targets
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10have_xen_pci_passthrough is only used for Xen x86 VMs.
11
12Signed-off-by: Stefano Stabellini <stefano.stabellini@amd.com>
13Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14---
15 meson.build | 2 ++
16 1 file changed, 2 insertions(+)
17
18diff --git a/meson.build b/meson.build
19index b94f0cd76e..a4965251ab 100644
20--- a/meson.build
21+++ b/meson.build
22@@ -1469,6 +1469,8 @@ have_xen_pci_passthrough = get_option('xen_pci_passthrough') \
23 error_message: 'Xen PCI passthrough requested but Xen not enabled') \
24 .require(targetos == 'linux',
25 error_message: 'Xen PCI passthrough not available on this platform') \
26+ .require(cpu == 'x86' or cpu == 'x86_64',
27+ error_message: 'Xen PCI passthrough not available on this platform') \
28 .allowed()
29
30
31--
322.17.1
33
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0012-xen-arm-call-qemu_find_tpm_be-if-CONFIG_TPM.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0012-xen-arm-call-qemu_find_tpm_be-if-CONFIG_TPM.patch
new file mode 100644
index 00000000..f80a0873
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0012-xen-arm-call-qemu_find_tpm_be-if-CONFIG_TPM.patch
@@ -0,0 +1,72 @@
1From a26982a55fa5f47116b344ca5d411f00c3a2b422 Mon Sep 17 00:00:00 2001
2From: Stefano Stabellini <stefano.stabellini@amd.com>
3Date: Thu, 7 Jul 2022 14:35:33 -0700
4Subject: [PATCH 12/16] xen-arm: call qemu_find_tpm_be if CONFIG_TPM
5
6qemu_find_tpm_be is only availablen when CONFIG_TPM is enabled.
7So #ifdef the call to make sure the code builds correctly even when
8CONFIG_TPM is not enabled.
9
10Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
11---
12 hw/arm/xen_arm.c | 28 +++++++++++++++++-----------
13 1 file changed, 17 insertions(+), 11 deletions(-)
14
15diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
16index 0922e3db84..f248b5744a 100644
17--- a/hw/arm/xen_arm.c
18+++ b/hw/arm/xen_arm.c
19@@ -99,26 +99,18 @@ static int xen_init_ioreq(XenIOState *state, unsigned int max_cpus)
20 return 0;
21 }
22
23-
24-static void xen_arm_init(MachineState *machine)
25+static void xen_enable_tpm(void)
26 {
27+#ifdef CONFIG_TPM
28+ Error *errp = NULL;
29 DeviceState *dev;
30 SysBusDevice *busdev;
31- Error *errp = NULL;
32- XenArmState *xam = XEN_ARM(machine);
33-
34- xam->state = g_new0(XenIOState, 1);
35-
36- if (xen_init_ioreq(xam->state, machine->smp.cpus)) {
37- return;
38- }
39
40 TPMBackend *be = qemu_find_tpm_be("tpm0");
41 if (be == NULL) {
42 DPRINTF("Couldn't fine the backend for tpm0\n");
43 return;
44 }
45-
46 dev = qdev_new(TYPE_TPM_TIS_SYSBUS);
47 object_property_set_link(OBJECT(dev), "tpmdev", OBJECT(be), &errp);
48 object_property_set_str(OBJECT(dev), "tpmdev", be->id, &errp);
49@@ -127,6 +119,20 @@ static void xen_arm_init(MachineState *machine)
50 sysbus_mmio_map(busdev, 0, GUEST_TPM_BASE);
51
52 DPRINTF("Connected tpmdev at address 0x%lx\n", GUEST_TPM_BASE);
53+#endif
54+}
55+
56+static void xen_arm_init(MachineState *machine)
57+{
58+ XenArmState *xam = XEN_ARM(machine);
59+
60+ xam->state = g_new0(XenIOState, 1);
61+
62+ if (xen_init_ioreq(xam->state, machine->smp.cpus)) {
63+ return;
64+ }
65+
66+ xen_enable_tpm();
67
68 return;
69 }
70--
712.17.1
72
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0013-arm-xenpv-fix-TPM-address-print-warning.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0013-arm-xenpv-fix-TPM-address-print-warning.patch
new file mode 100644
index 00000000..1aa09efb
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0013-arm-xenpv-fix-TPM-address-print-warning.patch
@@ -0,0 +1,27 @@
1From c5b128668d9cd1e1cb4da80d5bc8aaebc6ff2e19 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Fri, 23 Dec 2022 00:06:29 +0000
4Subject: [PATCH 13/16] arm: xenpv: fix TPM address print warning
5
6Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
7Acked-by: Stefano Stabellini <stefano.stabellini@amd.com>
8---
9 hw/arm/xen_arm.c | 2 +-
10 1 file changed, 1 insertion(+), 1 deletion(-)
11
12diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
13index f248b5744a..153cedfeb4 100644
14--- a/hw/arm/xen_arm.c
15+++ b/hw/arm/xen_arm.c
16@@ -118,7 +118,7 @@ static void xen_enable_tpm(void)
17 sysbus_realize_and_unref(busdev, &error_fatal);
18 sysbus_mmio_map(busdev, 0, GUEST_TPM_BASE);
19
20- DPRINTF("Connected tpmdev at address 0x%lx\n", GUEST_TPM_BASE);
21+ DPRINTF("Connected tpmdev at address 0x%llx\n", GUEST_TPM_BASE);
22 #endif
23 }
24
25--
262.17.1
27
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0014-xen_arm-Create-virtio-mmio-devices-during-initializa.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0014-xen_arm-Create-virtio-mmio-devices-during-initializa.patch
new file mode 100644
index 00000000..a6925acf
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0014-xen_arm-Create-virtio-mmio-devices-during-initializa.patch
@@ -0,0 +1,83 @@
1From 3dc39d71c3652bea37dc955d5dbf8cd391d2aed0 Mon Sep 17 00:00:00 2001
2From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
3Date: Sat, 30 Jul 2022 17:51:19 +0300
4Subject: [PATCH 14/16] xen_arm: Create virtio-mmio devices during
5 initialization
6
7In order to use virtio backends we need to allocate virtio-mmio
8parameters (irq and base) and register corresponding buses.
9
10Use the constants defined in public header arch-arm.h to be
11aligned with the toolstack. So the number of current supported
12virtio-mmio devices is 10.
13
14For the interrupts triggering use already existing on Arm
15device-model hypercall.
16
17The toolstack should then insert the same amount of device nodes
18into guest device-tree.
19
20Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
21Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
22Reviewed-by: Stefano Stabellini <stefano.stabellini@amd.com>
23---
24 hw/arm/xen_arm.c | 29 +++++++++++++++++++++++++++++
25 1 file changed, 29 insertions(+)
26
27diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
28index 153cedfeb4..2012ee7aff 100644
29--- a/hw/arm/xen_arm.c
30+++ b/hw/arm/xen_arm.c
31@@ -25,6 +25,7 @@
32 #include "qemu/error-report.h"
33 #include "qapi/qapi-commands-migration.h"
34 #include "hw/boards.h"
35+#include "hw/irq.h"
36 #include "hw/sysbus.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/tpm_backend.h"
39@@ -55,6 +56,32 @@ struct XenArmState {
40 XenIOState *state;
41 };
42
43+#define VIRTIO_MMIO_DEV_SIZE 0x200
44+
45+#define NR_VIRTIO_MMIO_DEVICES \
46+ (GUEST_VIRTIO_MMIO_SPI_LAST - GUEST_VIRTIO_MMIO_SPI_FIRST)
47+
48+static void xen_set_irq(void *opaque, int irq, int level)
49+{
50+ xendevicemodel_set_irq_level(xen_dmod, xen_domid, irq, level);
51+}
52+
53+static void xen_create_virtio_mmio_devices(XenArmState *xam)
54+{
55+ int i;
56+
57+ for (i = 0; i < NR_VIRTIO_MMIO_DEVICES; i++) {
58+ hwaddr base = GUEST_VIRTIO_MMIO_BASE + i * VIRTIO_MMIO_DEV_SIZE;
59+ qemu_irq irq = qemu_allocate_irq(xen_set_irq, NULL,
60+ GUEST_VIRTIO_MMIO_SPI_FIRST + i);
61+
62+ sysbus_create_simple("virtio-mmio", base, irq);
63+
64+ DPRINTF("Created virtio-mmio device %d: irq %d base 0x%lx\n",
65+ i, GUEST_VIRTIO_MMIO_SPI_FIRST + i, base);
66+ }
67+}
68+
69 void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
70 {
71 hw_error("Invalid ioreq type 0x%x\n", req->type);
72@@ -132,6 +159,8 @@ static void xen_arm_init(MachineState *machine)
73 return;
74 }
75
76+ xen_create_virtio_mmio_devices(xam);
77+
78 xen_enable_tpm();
79
80 return;
81--
822.17.1
83
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0015-xen_arm-Initialize-RAM-and-add-hi-low-memory-regions.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0015-xen_arm-Initialize-RAM-and-add-hi-low-memory-regions.patch
new file mode 100644
index 00000000..7c2b272d
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0015-xen_arm-Initialize-RAM-and-add-hi-low-memory-regions.patch
@@ -0,0 +1,105 @@
1From a284a53c5374e19ac37b884f2dd50293e7c8070e Mon Sep 17 00:00:00 2001
2From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
3Date: Sat, 30 Jul 2022 17:18:06 +0300
4Subject: [PATCH 15/16] xen_arm: Initialize RAM and add hi/low memory regions
5
6In order to use virtio backends we need to initialize RAM for the
7xen-mapcache (which is responsible for mapping guest memory using foreign
8mapping) to work. Calculate and add hi/low memory regions based on
9machine->ram_size.
10
11Use the constants defined in public header arch-arm.h to be aligned with the
12toolstack.
13
14The toolstack should then pass real ram_size using "-m" arg.
15If "-m" is not given, create a QEMU machine without IOREQ, TPM and VIRTIO to
16keep it usable for /etc/init.d/xencommons.
17
18Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
19Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
20Reviewed-by: Stefano Stabellini <stefano.stabellini@amd.com>
21---
22 hw/arm/xen_arm.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
23 1 file changed, 46 insertions(+)
24
25diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
26index 2012ee7aff..fde919df29 100644
27--- a/hw/arm/xen_arm.c
28+++ b/hw/arm/xen_arm.c
29@@ -56,6 +56,8 @@ struct XenArmState {
30 XenIOState *state;
31 };
32
33+static MemoryRegion ram_lo, ram_hi;
34+
35 #define VIRTIO_MMIO_DEV_SIZE 0x200
36
37 #define NR_VIRTIO_MMIO_DEVICES \
38@@ -82,6 +84,39 @@ static void xen_create_virtio_mmio_devices(XenArmState *xam)
39 }
40 }
41
42+static void xen_init_ram(MachineState *machine)
43+{
44+ MemoryRegion *sysmem = get_system_memory();
45+ ram_addr_t block_len, ram_size[GUEST_RAM_BANKS];
46+
47+ if (machine->ram_size <= GUEST_RAM0_SIZE) {
48+ ram_size[0] = machine->ram_size;
49+ ram_size[1] = 0;
50+ block_len = GUEST_RAM0_BASE + ram_size[0];
51+ } else {
52+ ram_size[0] = GUEST_RAM0_SIZE;
53+ ram_size[1] = machine->ram_size - GUEST_RAM0_SIZE;
54+ block_len = GUEST_RAM1_BASE + ram_size[1];
55+ }
56+
57+ memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
58+ &error_fatal);
59+
60+ memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", &ram_memory,
61+ GUEST_RAM0_BASE, ram_size[0]);
62+ memory_region_add_subregion(sysmem, GUEST_RAM0_BASE, &ram_lo);
63+ DPRINTF("Initialized region xen.ram.lo: base 0x%llx size 0x%lx\n",
64+ GUEST_RAM0_BASE, ram_size[0]);
65+
66+ if (ram_size[1] > 0) {
67+ memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", &ram_memory,
68+ GUEST_RAM1_BASE, ram_size[1]);
69+ memory_region_add_subregion(sysmem, GUEST_RAM1_BASE, &ram_hi);
70+ DPRINTF("Initialized region xen.ram.hi: base 0x%llx size 0x%lx\n",
71+ GUEST_RAM1_BASE, ram_size[1]);
72+ }
73+}
74+
75 void arch_handle_ioreq(XenIOState *state, ioreq_t *req)
76 {
77 hw_error("Invalid ioreq type 0x%x\n", req->type);
78@@ -155,6 +190,14 @@ static void xen_arm_init(MachineState *machine)
79
80 xam->state = g_new0(XenIOState, 1);
81
82+ if (machine->ram_size == 0) {
83+ DPRINTF("ram_size not specified. QEMU machine will be started without"
84+ " TPM, IOREQ and Virtio-MMIO backends\n");
85+ return;
86+ }
87+
88+ xen_init_ram(machine);
89+
90 if (xen_init_ioreq(xam->state, machine->smp.cpus)) {
91 return;
92 }
93@@ -173,6 +216,9 @@ static void xen_arm_machine_class_init(ObjectClass *oc, void *data)
94 mc->desc = "Xen Para-virtualized PC";
95 mc->init = xen_arm_init;
96 mc->max_cpus = 1;
97+ /* Set explicitly here to make sure that real ram_size is passed */
98+ mc->default_ram_size = 0;
99+
100 machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
101 }
102
103--
1042.17.1
105
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0016-xen_arm-Add-accel-xen-and-drop-extra-interface-openi.patch b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0016-xen_arm-Add-accel-xen-and-drop-extra-interface-openi.patch
new file mode 100644
index 00000000..14f2e240
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu/0016-xen_arm-Add-accel-xen-and-drop-extra-interface-openi.patch
@@ -0,0 +1,79 @@
1From a730d5ea4a0445a8c694b56583dd06bd000fae74 Mon Sep 17 00:00:00 2001
2From: Vikram Garhwal <vikram.garhwal@amd.com>
3Date: Wed, 4 Jan 2023 23:05:25 +0000
4Subject: [PATCH 16/16] xen_arm: Add "accel = xen" and drop extra interface
5 openings
6
7In order to use virtio backends we need to make sure that Xen accelerator
8is enabled (xen_enabled() returns true) as the memory/cache systems
9check for xen_enabled() to perform specific actions. Without that
10the xen-mapcache (which is needed for mapping guest memory) is not in use.
11
12Also drop extra interface opening as this is already done in xen-all.c
13(so drop xen_init_ioreq() completely) and skip virtio/tpm initialization
14if device emulation is not available.
15
16Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
17Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com>
18Reviewed-by: Stefano Stabellini <stefano.stabellini@amd.com>
19---
20 hw/arm/xen_arm.c | 29 ++---------------------------
21 1 file changed, 2 insertions(+), 27 deletions(-)
22
23diff --git a/hw/arm/xen_arm.c b/hw/arm/xen_arm.c
24index fde919df29..4ac425a3c5 100644
25--- a/hw/arm/xen_arm.c
26+++ b/hw/arm/xen_arm.c
27@@ -137,30 +137,6 @@ void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
28 {
29 }
30
31-static int xen_init_ioreq(XenIOState *state, unsigned int max_cpus)
32-{
33- xen_dmod = xendevicemodel_open(0, 0);
34- xen_xc = xc_interface_open(0, 0, 0);
35-
36- if (xen_xc == NULL) {
37- perror("xen: can't open xen interface\n");
38- return -1;
39- }
40-
41- xen_fmem = xenforeignmemory_open(0, 0);
42- if (xen_fmem == NULL) {
43- perror("xen: can't open xen fmem interface\n");
44- xc_interface_close(xen_xc);
45- return -1;
46- }
47-
48- xen_register_ioreq(state, max_cpus, xen_memory_listener);
49-
50- xenstore_record_dm_state(xenstore, "running");
51-
52- return 0;
53-}
54-
55 static void xen_enable_tpm(void)
56 {
57 #ifdef CONFIG_TPM
58@@ -198,9 +174,7 @@ static void xen_arm_init(MachineState *machine)
59
60 xen_init_ram(machine);
61
62- if (xen_init_ioreq(xam->state, machine->smp.cpus)) {
63- return;
64- }
65+ xen_register_ioreq(xam->state, machine->smp.cpus, xen_memory_listener);
66
67 xen_create_virtio_mmio_devices(xam);
68
69@@ -218,6 +192,7 @@ static void xen_arm_machine_class_init(ObjectClass *oc, void *data)
70 mc->max_cpus = 1;
71 /* Set explicitly here to make sure that real ram_size is passed */
72 mc->default_ram_size = 0;
73+ mc->default_machine_opts = "accel=xen";
74
75 machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
76 }
77--
782.17.1
79
diff --git a/meta-xilinx-virtualization/recipes-devtools/qemu/qemu_%.bbappend b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu_%.bbappend
new file mode 100644
index 00000000..3e93710c
--- /dev/null
+++ b/meta-xilinx-virtualization/recipes-devtools/qemu/qemu_%.bbappend
@@ -0,0 +1,5 @@
1require qemu-tpm.inc
2require qemu-xen.inc
3
4# We do not want QEMU, on the target to be configured with OpenGL
5PACKAGECONFIG:remove:class-target:petalinux = "virglrenderer epoxy gtk+"