summaryrefslogtreecommitdiffstats
path: root/recipes-core/vxn/vxn_1.0.bb
diff options
context:
space:
mode:
authorBruce Ashfield <bruce.ashfield@gmail.com>2026-02-15 04:35:55 +0000
committerBruce Ashfield <bruce.ashfield@gmail.com>2026-02-26 01:05:01 +0000
commit57d267db7878180d1ecd1936df5284550d0031c3 (patch)
treeb2205ccb2e6114fdda4384518d4b9047209756ef /recipes-core/vxn/vxn_1.0.bb
parent0fe8c4444f3199b862a4ba52b2b62b5f9b2af85f (diff)
downloadmeta-virtualization-57d267db7878180d1ecd1936df5284550d0031c3.tar.gz
vxn: add Xen DomU container runtime with OCI image support
vxn runs OCI containers as Xen DomU guests — the VM IS the container. No Docker/containerd runs inside the guest; the init script directly mounts the container rootfs and execs the entrypoint via chroot. Host-side (Dom0): - vxn.sh: Docker-like CLI wrapper (sets HYPERVISOR=xen) - vrunner-backend-xen.sh: Xen xl backend for vrunner - hv_prepare_container(): pulls OCI images via skopeo, resolves entrypoint from OCI config using jq on host - xl create for VM lifecycle (PVH on aarch64, PV on x86_64) - Bridge networking with iptables DNAT for port forwards - Console capture via xl console for ephemeral mode Guest-side (DomU): - vxn-init.sh: mounts container rootfs from input disk, extracts OCI layers, execs entrypoint via chroot - Supports containers with or without /bin/sh - grep/sed fallback for OCI config parsing (no jq needed) - Daemon mode with command loop on hvc1 - vcontainer-init-common.sh: hypervisor detection, head -n fix - vcontainer-preinit.sh: init selection via vcontainer.init= Build system: - vxn-initramfs-create.inc: assembles boot blobs from vruntime multiconfig, injects vxn-init.sh into rootfs squashfs - vxn_1.0.bb: Dom0 package with scripts + blobs - nostamp on install/package chain (blobs from DEPLOY_DIR are untracked by sstate) - vxn.cfg: Xen PV kernel config fragment Tested: vxn -it --no-daemon run --rm hello-world Signed-off-by: Bruce Ashfield <bruce.ashfield@gmail.com>
Diffstat (limited to 'recipes-core/vxn/vxn_1.0.bb')
-rw-r--r--recipes-core/vxn/vxn_1.0.bb167
1 files changed, 167 insertions, 0 deletions
diff --git a/recipes-core/vxn/vxn_1.0.bb b/recipes-core/vxn/vxn_1.0.bb
new file mode 100644
index 00000000..2a36274a
--- /dev/null
+++ b/recipes-core/vxn/vxn_1.0.bb
@@ -0,0 +1,167 @@
1# SPDX-FileCopyrightText: Copyright (C) 2025 Bruce Ashfield
2#
3# SPDX-License-Identifier: MIT
4#
5# vxn_1.0.bb
6# ===========================================================================
7# Target integration package for vxn (vcontainer on Xen)
8# ===========================================================================
9#
10# This recipe installs vxn onto a Xen Dom0 target. It provides:
11# - vxn CLI wrapper (docker-like interface for Xen DomU containers)
12# - vrunner.sh (hypervisor-agnostic VM runner)
13# - vrunner-backend-xen.sh (Xen xl backend)
14# - vcontainer-common.sh (shared CLI code)
15# - Kernel, initramfs, and rootfs blobs for booting DomU guests
16#
17# The blobs are sourced from the vxn-initramfs-create recipe which
18# reuses the same rootfs images built by vdkr/vpdmn (the init scripts
19# detect the hypervisor at boot time).
20#
21# ===========================================================================
22# BUILD INSTRUCTIONS
23# ===========================================================================
24#
25# For aarch64 Dom0:
26# MACHINE=qemuarm64 bitbake vxn
27#
28# For x86_64 Dom0:
29# MACHINE=qemux86-64 bitbake vxn
30#
31# Add to a Dom0 image:
32# IMAGE_INSTALL:append = " vxn"
33#
34# Usage on Dom0:
35# vxn run hello-world # Run OCI container as Xen DomU
36# vxn vmemres start # Start persistent DomU (daemon mode)
37# vxn vexpose # Expose Docker API on Dom0
38#
39# ===========================================================================
40
41SUMMARY = "Docker CLI for Xen-based container execution"
42DESCRIPTION = "vxn provides a familiar docker-like CLI that executes commands \
43 inside a Xen DomU guest with Docker. It uses the vcontainer \
44 infrastructure with a Xen hypervisor backend."
45HOMEPAGE = "https://git.yoctoproject.org/meta-virtualization/"
46LICENSE = "MIT"
47LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
48
49inherit features_check
50REQUIRED_DISTRO_FEATURES = "xen"
51
52SRC_URI = "\
53 file://vxn.sh \
54 file://vrunner.sh \
55 file://vrunner-backend-xen.sh \
56 file://vrunner-backend-qemu.sh \
57 file://vcontainer-common.sh \
58"
59
60FILESEXTRAPATHS:prepend := "${THISDIR}/../../recipes-containers/vcontainer/files:"
61
62S = "${UNPACKDIR}"
63
64# Runtime dependencies on Dom0
65RDEPENDS:${PN} = "\
66 xen-tools-xl \
67 bash \
68 jq \
69 socat \
70 coreutils \
71 util-linux \
72 e2fsprogs \
73 skopeo \
74"
75
76# Blobs are sourced from vxn-initramfs-create deploy output.
77# Build blobs first: bitbake vxn-initramfs-create
78# No task dependency here - vxn-initramfs-create is deploy-only (no packages).
79# Adding any dependency from a packaged recipe to a deploy-only recipe
80# breaks do_rootfs (sstate manifest not found for package_write_rpm).
81
82# Blobs come from DEPLOY_DIR which is untracked by sstate hash.
83# nostamp on do_install alone is insufficient — do_package and
84# do_package_write_rpm have unchanged sstate hashes so they restore
85# the OLD RPM from cache, discarding the fresh do_install output.
86# Force the entire install→package→RPM chain to always re-run.
87do_install[nostamp] = "1"
88do_package[nostamp] = "1"
89do_packagedata[nostamp] = "1"
90do_package_write_rpm[nostamp] = "1"
91do_package_write_ipk[nostamp] = "1"
92do_package_write_deb[nostamp] = "1"
93
94def vxn_get_blob_arch(d):
95 arch = d.getVar('TARGET_ARCH')
96 if arch == 'aarch64':
97 return 'aarch64'
98 elif arch in ['x86_64', 'i686', 'i586']:
99 return 'x86_64'
100 return 'aarch64'
101
102def vxn_get_kernel_image_name(d):
103 arch = d.getVar('TARGET_ARCH')
104 if arch == 'aarch64':
105 return 'Image'
106 elif arch in ['x86_64', 'i686', 'i586']:
107 return 'bzImage'
108 elif arch == 'arm':
109 return 'zImage'
110 return 'Image'
111
112BLOB_ARCH = "${@vxn_get_blob_arch(d)}"
113KERNEL_IMAGETYPE_VXN = "${@vxn_get_kernel_image_name(d)}"
114
115VXN_DEPLOY = "${DEPLOY_DIR_IMAGE}"
116
117do_install() {
118 # Install CLI wrapper
119 install -d ${D}${bindir}
120 install -m 0755 ${S}/vxn.sh ${D}${bindir}/vxn
121
122 # Install shared scripts into libdir
123 install -d ${D}${libdir}/vxn
124 install -m 0755 ${S}/vrunner.sh ${D}${libdir}/vxn/
125 install -m 0755 ${S}/vrunner-backend-xen.sh ${D}${libdir}/vxn/
126 install -m 0755 ${S}/vrunner-backend-qemu.sh ${D}${libdir}/vxn/
127 install -m 0644 ${S}/vcontainer-common.sh ${D}${libdir}/vxn/
128
129 # Install blobs from vxn-initramfs-create deployment
130 # Layout must match what vrunner backends expect: $BLOB_DIR/<arch>/{Image,initramfs.cpio.gz,rootfs.img}
131 install -d ${D}${datadir}/vxn/${BLOB_ARCH}
132
133 VXN_BLOB_SRC="${VXN_DEPLOY}/vxn/${BLOB_ARCH}"
134 if [ -d "${VXN_BLOB_SRC}" ]; then
135 if [ -f "${VXN_BLOB_SRC}/${KERNEL_IMAGETYPE_VXN}" ]; then
136 install -m 0644 "${VXN_BLOB_SRC}/${KERNEL_IMAGETYPE_VXN}" ${D}${datadir}/vxn/${BLOB_ARCH}/
137 bbnote "Installed kernel ${KERNEL_IMAGETYPE_VXN}"
138 else
139 bbwarn "Kernel not found at ${VXN_BLOB_SRC}/${KERNEL_IMAGETYPE_VXN}"
140 fi
141
142 if [ -f "${VXN_BLOB_SRC}/initramfs.cpio.gz" ]; then
143 install -m 0644 "${VXN_BLOB_SRC}/initramfs.cpio.gz" ${D}${datadir}/vxn/${BLOB_ARCH}/
144 bbnote "Installed initramfs"
145 else
146 bbwarn "Initramfs not found at ${VXN_BLOB_SRC}/initramfs.cpio.gz"
147 fi
148
149 if [ -f "${VXN_BLOB_SRC}/rootfs.img" ]; then
150 install -m 0644 "${VXN_BLOB_SRC}/rootfs.img" ${D}${datadir}/vxn/${BLOB_ARCH}/
151 bbnote "Installed rootfs.img"
152 else
153 bbwarn "Rootfs not found at ${VXN_BLOB_SRC}/rootfs.img"
154 fi
155 else
156 bbwarn "VXN blob directory not found at ${VXN_BLOB_SRC}. Build with: bitbake vxn-initramfs-create"
157 fi
158}
159
160FILES:${PN} = "\
161 ${bindir}/vxn \
162 ${libdir}/vxn/ \
163 ${datadir}/vxn/ \
164"
165
166# Blobs are large binary files
167INSANE_SKIP:${PN} += "already-stripped"