diff options
author | Robert Yang <liezhi.yang@windriver.com> | 2016-08-25 07:41:05 -0700 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2016-09-09 12:07:32 +0100 |
commit | b405712414de98cba3a645601419aceb32d52f02 (patch) | |
tree | 3fb4412aa8ac988de1f9154e5f9968fa77b5abeb /scripts/runqemu | |
parent | 638d19adb4eefc7e801f9409d0b348a5a39cc25a (diff) | |
download | poky-b405712414de98cba3a645601419aceb32d52f02.tar.gz |
runqemu: refactor it and remove machine knowledge
Previously, runqemu had hard coded machine knowledge, which limited its
usage, for example, qemu can boot genericx86, but runqemu can't, we need
edit runqemu/runqemu-internal a lot if we want to boot genericx86.
Now bsp conf files can set vars to make it can be boot by runqemu, and
qemuboot.bbclass will save these info to DEPLOY_DIR_IMAGE/qemuboot.conf.
Please see qemuboot.bbclass' comments on how to set the vars.
* Re-write it in python3, which can reduce lines from 1239 to about 750
lines
* All the machine knowledges are gone
* All of the TUN_ARCH knowledge are gone
* All the previous options are preserved, and there is a new way to run
runqemu: (it doesn't need run "bitake -e" in such a case)
$ runqemu tmp/deploy/images/qemux86
or:
$ runqemu tmp/deploy/images/qemuarm/<image>.ext4
or:
$ runqemu tmp/deploy/images/qemuarm/qemuboot.conf
* Fixed audio support, not limited on x86 or x86_64
* Fix SLIRP mode, add help message, avoid mixing with tap
* Fix NFS boot, it will extract <image>.tar.bz2 or tar.gz to
DEPLOY_DIR_IMAGE/<image>-nfsroot when no NFS_DIR, and remove it after
stop.
* More bsps can be boot, such as genericx86 and genericx86-64.
* The patch for qemuzynq, qemuzynqmp, qemumicroblaze has been sent to
meta-xilinx' mailing list.
* I can't find any qemush4 bsp or how to build it, so it is not
considered atm.
[YOCTO #1018]
[YOCTO #4827]
[YOCTO #7459]
[YOCTO #7887]
(From OE-Core rev: 60ca8a8d899b90a4693fd62b6ec97d0c76a9f6c5)
Signed-off-by: Robert Yang <liezhi.yang@windriver.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/runqemu')
-rwxr-xr-x | scripts/runqemu | 1332 |
1 files changed, 807 insertions, 525 deletions
diff --git a/scripts/runqemu b/scripts/runqemu index d52ea15936..7b0bcb24ee 100755 --- a/scripts/runqemu +++ b/scripts/runqemu | |||
@@ -1,8 +1,9 @@ | |||
1 | #!/bin/bash | 1 | #!/usr/bin/env python3 |
2 | # | 2 | |
3 | # Handle running OE images standalone with QEMU | 3 | # Handle running OE images standalone with QEMU |
4 | # | 4 | # |
5 | # Copyright (C) 2006-2011 Linux Foundation | 5 | # Copyright (C) 2006-2011 Linux Foundation |
6 | # Copyright (c) 2016 Wind River Systems, Inc. | ||
6 | # | 7 | # |
7 | # This program is free software; you can redistribute it and/or modify | 8 | # This program is free software; you can redistribute it and/or modify |
8 | # it under the terms of the GNU General Public License version 2 as | 9 | # it under the terms of the GNU General Public License version 2 as |
@@ -17,536 +18,817 @@ | |||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | 18 | # with this program; if not, write to the Free Software Foundation, Inc., |
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | 19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
19 | 20 | ||
20 | usage() { | 21 | import os |
21 | MYNAME=`basename $0` | 22 | import sys |
22 | cat <<_EOF | 23 | import logging |
24 | import subprocess | ||
25 | import re | ||
26 | import fcntl | ||
27 | import shutil | ||
28 | import glob | ||
29 | import configparser | ||
30 | |||
31 | def create_logger(): | ||
32 | logger = logging.getLogger('runqemu') | ||
33 | logger.setLevel(logging.INFO) | ||
34 | |||
35 | # create console handler and set level to debug | ||
36 | ch = logging.StreamHandler() | ||
37 | ch.setLevel(logging.INFO) | ||
38 | |||
39 | # create formatter | ||
40 | formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') | ||
41 | |||
42 | # add formatter to ch | ||
43 | ch.setFormatter(formatter) | ||
44 | |||
45 | # add ch to logger | ||
46 | logger.addHandler(ch) | ||
47 | |||
48 | return logger | ||
49 | |||
50 | logger = create_logger() | ||
23 | 51 | ||
52 | def print_usage(): | ||
53 | print(""" | ||
24 | Usage: you can run this script with any valid combination | 54 | Usage: you can run this script with any valid combination |
25 | of the following environment variables (in any order): | 55 | of the following environment variables (in any order): |
26 | KERNEL - the kernel image file to use | 56 | KERNEL - the kernel image file to use |
27 | ROOTFS - the rootfs image file or nfsroot directory to use | 57 | ROOTFS - the rootfs image file or nfsroot directory to use |
28 | MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified) | 58 | MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified) |
29 | Simplified QEMU command-line options can be passed with: | 59 | Simplified QEMU command-line options can be passed with: |
30 | nographic - disables video console | 60 | nographic - disable video console |
31 | serial - enables a serial console on /dev/ttyS0 | 61 | serial - enable a serial console on /dev/ttyS0 |
32 | kvm - enables KVM when running qemux86/qemux86-64 (VT-capable CPU required) | 62 | slirp - enable user networking, no root privileges is required |
33 | kvm-vhost - enables KVM with vhost support when running qemux86/qemux86-64 (VT-capable CPU required) | 63 | kvm - enable KVM when running x86/x86_64 (VT-capable CPU required) |
64 | kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required) | ||
34 | publicvnc - enable a VNC server open to all hosts | 65 | publicvnc - enable a VNC server open to all hosts |
35 | qemuparams="xyz" - specify custom parameters to QEMU | 66 | audio - enable audio |
36 | bootparams="xyz" - specify custom kernel parameters during boot | 67 | tcpserial=<port> - specify tcp serial port number |
68 | biosdir=<dir> - specify custom bios dir | ||
69 | biosfilename=<filename> - specify bios filename | ||
70 | qemuparams=<xyz> - specify custom parameters to QEMU | ||
71 | bootparams=<xyz> - specify custom kernel parameters during boot | ||
72 | help: print this text | ||
37 | 73 | ||
38 | Examples: | 74 | Examples: |
39 | $MYNAME qemuarm | 75 | runqemu qemuarm |
40 | $MYNAME qemux86-64 core-image-sato ext4 | 76 | runqemu tmp/deploy/images/qemuarm |
41 | $MYNAME qemux86-64 wic-image-minimal wic | 77 | runqemu tmp/deploy/images/qemux86/.qemuboot.conf |
42 | $MYNAME path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial | 78 | runqemu qemux86-64 core-image-sato ext4 |
43 | $MYNAME qemux86 iso/hddimg/vmdk/qcow2/vdi/ramfs/cpio.gz... | 79 | runqemu qemux86-64 wic-image-minimal wic |
44 | $MYNAME qemux86 qemuparams="-m 256" | 80 | runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial |
45 | $MYNAME qemux86 bootparams="psplash=false" | 81 | runqemu qemux86 iso/hddimg/vmdk/qcow2/vdi/ramfs/cpio.gz... |
46 | $MYNAME path/to/<image>-<machine>.vmdk | 82 | runqemu qemux86 qemuparams="-m 256" |
47 | $MYNAME path/to/<image>-<machine>.wic | 83 | runqemu qemux86 bootparams="psplash=false" |
48 | _EOF | 84 | runqemu path/to/<image>-<machine>.vmdk |
49 | exit 1 | 85 | runqemu path/to/<image>-<machine>.wic |
50 | } | 86 | """) |
51 | 87 | ||
52 | if [ "x$1" = "x" ]; then | 88 | def check_tun(): |
53 | usage | 89 | """Check /dev/net/run""" |
54 | fi | 90 | dev_tun = '/dev/net/tun' |
55 | 91 | if not os.path.exists(dev_tun): | |
56 | error() { | 92 | raise Exception("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun) |
57 | echo "Error: "$* | 93 | |
58 | usage | 94 | if not os.access(dev_tun, os.W_OK): |
59 | } | 95 | raise Exception("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun)) |
60 | 96 | ||
61 | MACHINE=${MACHINE:=""} | 97 | def check_libgl(qemu_bin): |
62 | KERNEL=${KERNEL:=""} | 98 | cmd = 'ldd %s' % qemu_bin |
63 | ROOTFS=${ROOTFS:=""} | 99 | logger.info('Running %s...' % cmd) |
64 | FSTYPE=${FSTYPE:=""} | 100 | need_gl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
65 | LAZY_ROOTFS="" | 101 | if re.search('libGLU', need_gl): |
66 | SCRIPT_QEMU_OPT="" | 102 | # We can't run without a libGL.so |
67 | SCRIPT_QEMU_EXTRA_OPT="" | 103 | libgl = False |
68 | SCRIPT_KERNEL_OPT="" | 104 | check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \ |
69 | SERIALSTDIO="" | 105 | ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \ |
70 | TCPSERIAL_PORTNUM="" | 106 | ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so')) |
71 | KVM_ENABLED="no" | 107 | |
72 | KVM_ACTIVE="no" | 108 | for (f1, f2) in check_files: |
73 | VHOST_ENABLED="no" | 109 | if re.search('\*', f1): |
74 | VHOST_ACTIVE="no" | 110 | for g1 in glob.glob(f1): |
75 | IS_VM="false" | 111 | if libgl: |
76 | 112 | break | |
77 | # Determine whether the file is a kernel or QEMU image, and set the | 113 | if os.path.exists(g1): |
78 | # appropriate variables | 114 | for g2 in glob.glob(f2): |
79 | process_filename() { | 115 | if os.path.exists(g2): |
80 | filename=$1 | 116 | libgl = True |
81 | 117 | break | |
82 | # Extract the filename extension | 118 | if libgl: |
83 | EXT=`echo $filename | awk -F . '{ print \$NF }'` | 119 | break |
84 | case /$EXT/ in | 120 | else: |
85 | /bin/) | 121 | if os.path.exists(f1) and os.path.exists(f2): |
86 | # A file ending in .bin is a kernel | 122 | libgl = True |
87 | [ -z "$KERNEL" ] && KERNEL=$filename || \ | 123 | break |
88 | error "conflicting KERNEL args [$KERNEL] and [$filename]" | 124 | if not libgl: |
89 | ;; | 125 | logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.") |
90 | /ext[234]/|/jffs2/|/btrfs/) | 126 | logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.") |
91 | # A file ending in a supportted fs type is a rootfs image | 127 | logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.") |
92 | if [ -z "$FSTYPE" -o "$FSTYPE" = "$EXT" ]; then | 128 | raise Exception('%s requires libGLU, but not found' % qemu_bin) |
93 | FSTYPE=$EXT | 129 | |
94 | ROOTFS=$filename | 130 | class BaseConfig(object): |
95 | else | 131 | def __init__(self): |
96 | error "conflicting FSTYPE types [$FSTYPE] and [$EXT]" | 132 | # Vars can be merged with .qemuboot.conf, use a dict to manage them. |
97 | fi | 133 | self.d = { |
98 | ;; | 134 | 'MACHINE': '', |
99 | /hddimg/|/hdddirect/|/vmdk/|/wic/|/qcow2/|/vdi/) | 135 | 'DEPLOY_DIR_IMAGE': '', |
100 | FSTYPE=$EXT | 136 | 'QB_KERNEL_ROOT': '/dev/vda', |
101 | VM=$filename | 137 | } |
102 | ROOTFS=$filename | 138 | |
103 | IS_VM="true" | 139 | self.qemu_opt = '' |
104 | ;; | 140 | self.qemu_opt_script = '' |
105 | *) | 141 | self.nfs_dir = '' |
106 | error "unknown file arg [$filename]" | 142 | self.clean_nfs_dir = False |
107 | ;; | 143 | self.nfs_server = '' |
108 | esac | 144 | self.rootfs = '' |
109 | } | 145 | self.qemuboot = '' |
110 | 146 | self.kernel = '' | |
111 | check_fstype_conflicts() { | 147 | self.kernel_cmdline = '' |
112 | if [ -z "$FSTYPE" -o "$FSTYPE" = "$1" ]; then | 148 | self.kernel_cmdline_script = '' |
113 | FSTYPE=$1 | 149 | self.fstype = '' |
114 | else | 150 | self.kvm_enabled = False |
115 | error "conflicting FSTYPE types [$FSTYPE] and [$1]" | 151 | self.vhost_enabled = False |
116 | fi | 152 | self.slirp_enabled = False |
117 | } | 153 | self.nfs_instance = 0 |
118 | # Parse command line args without requiring specific ordering. It's a | 154 | self.nfs_running = False |
119 | # bit more complex, but offers a great user experience. | 155 | self.serialstdio = False |
120 | while true; do | 156 | self.cleantap = False |
121 | arg=${1} | 157 | self.saved_stty = '' |
122 | case "$arg" in | 158 | self.audio_enabled = False |
123 | "qemux86" | "qemux86-64" | "qemuarm" | "qemuarm64" | "qemumips" | "qemumipsel" | \ | 159 | self.tcpserial_portnum = '' |
124 | "qemumips64" | "qemush4" | "qemuppc" | "qemumicroblaze" | "qemuzynq" | "qemuzynqmp") | 160 | self.custombiosdir = '' |
125 | [ -z "$MACHINE" -o "$MACHINE" = "$arg" ] && MACHINE=$arg || \ | 161 | self.lock = '' |
126 | error "conflicting MACHINE types [$MACHINE] and [$arg]" | 162 | self.lock_descriptor = '' |
127 | ;; | 163 | self.bitbake_e = '' |
128 | "ext"[234] | "jffs2" | "nfs" | "btrfs") | 164 | self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs', 'cpio.gz', 'cpio', 'ramfs') |
129 | check_fstype_conflicts $arg | 165 | self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'vmdk', 'qcow2', 'vdi', 'iso') |
130 | ;; | 166 | |
131 | "hddimg" | "hdddirect" | "wic" | "vmdk" | "qcow2" | "vdi" | "iso") | 167 | def acquire_lock(self): |
132 | check_fstype_conflicts $arg | 168 | logger.info("Acquiring lockfile %s..." % self.lock) |
133 | IS_VM="true" | 169 | lock_descriptor = open(self.lock, 'w') |
134 | ;; | 170 | try: |
135 | "ramfs" | "cpio.gz") | 171 | fcntl.flock(lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB) |
136 | FSTYPE=cpio.gz | 172 | except Exception as e: |
137 | ;; | 173 | logger.info("Acquiring lockfile %s failed: %s" % (self.lock, e)) |
138 | "nographic") | 174 | lock_descriptor.close() |
139 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -nographic" | 175 | return False |
140 | SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT console=ttyS0" | 176 | self.lock_descriptor = lock_descriptor |
141 | ;; | 177 | return True |
142 | "serial") | 178 | |
143 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -serial stdio" | 179 | def release_lock(self): |
144 | SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT console=ttyS0" | 180 | fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN) |
145 | SERIALSTDIO="1" | 181 | self.lock_descriptor.close() |
146 | ;; | 182 | os.remove(self.lock) |
147 | "tcpserial="*) | 183 | |
148 | TCPSERIAL_PORTNUM=${arg##tcpserial=} | 184 | def get(self, key): |
149 | ;; | 185 | if key in self.d: |
150 | "biosdir="*) | 186 | return self.d.get(key) |
151 | CUSTOMBIOSDIR="${arg##biosdir=}" | 187 | else: |
152 | ;; | 188 | return '' |
153 | "biosfilename="*) | 189 | |
154 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -bios ${arg##biosfilename=}" | 190 | def set(self, key, value): |
155 | ;; | 191 | self.d[key] = value |
156 | "qemuparams="*) | 192 | |
157 | SCRIPT_QEMU_EXTRA_OPT="${arg##qemuparams=}" | 193 | def is_deploy_dir_image(self, p): |
158 | 194 | if os.path.isdir(p): | |
159 | # Warn user if they try to specify serial or kvm options | 195 | if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M): |
160 | # to use simplified options instead | 196 | logger.info("Can't find required qemuboot.conf in %s" % p) |
161 | serial_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-serial\)'` | 197 | return False |
162 | kvm_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-enable-kvm\)'` | 198 | if not re.search('-image-', '\n'.join(os.listdir(p))): |
163 | vga_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-vga\)'` | 199 | logger.info("Can't find *-image-* in %s" % p) |
164 | [ ! -z "$serial_option" -o ! -z "$kvm_option" ] && \ | 200 | return False |
165 | echo "Please use simplified serial or kvm options instead" | 201 | return True |
166 | ;; | 202 | else: |
167 | "bootparams="*) | 203 | return False |
168 | SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT ${arg##bootparams=}" | 204 | |
169 | ;; | 205 | def check_arg_fstype(self, fst): |
170 | "audio") | 206 | """Check and set FSTYPE""" |
171 | if [ "x$MACHINE" = "xqemux86" -o "x$MACHINE" = "xqemux86-64" ]; then | 207 | if fst not in self.fstypes + self.vmtypes: |
172 | echo "Enabling audio in qemu." | 208 | logger.warn("Maybe unsupported FSTYPE: %s" % fst) |
173 | echo "Please install snd_intel8x0 or snd_ens1370 driver in linux guest." | 209 | if not self.fstype or self.fstype == fst: |
174 | QEMU_AUDIO_DRV="alsa" | 210 | if fst == 'ramfs': |
175 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -soundhw ac97,es1370" | 211 | fst = 'cpio.gz' |
176 | fi | 212 | self.fstype = fst |
177 | ;; | 213 | else: |
178 | "kvm") | 214 | raise Exception("Conflicting: FSTYPE %s and %s" % (self.fstype, fst)) |
179 | KVM_ENABLED="yes" | 215 | |
180 | KVM_CAPABLE=`grep -q 'vmx\|svm' /proc/cpuinfo && echo 1` | 216 | def set_machine_deploy_dir(self, machine, deploy_dir_image): |
181 | ;; | 217 | """Set MACHINE and DEPLOY_DIR_IMAGE""" |
182 | "kvm-vhost") | 218 | logger.info('MACHINE: %s' % machine) |
183 | KVM_ENABLED="yes" | 219 | self.set("MACHINE", machine) |
184 | KVM_CAPABLE=`grep -q 'vmx\|svm' /proc/cpuinfo && echo 1` | 220 | logger.info('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image) |
185 | VHOST_ENABLED="yes" | 221 | self.set("DEPLOY_DIR_IMAGE", deploy_dir_image) |
186 | ;; | 222 | |
187 | "slirp") | 223 | def check_arg_nfs(self, p): |
188 | SLIRP_ENABLED="yes" | 224 | if os.path.isdir(p): |
189 | ;; | 225 | self.nfs_dir = p |
190 | "publicvnc") | 226 | else: |
191 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -vnc :0" | 227 | m = re.match('(.*):(.*)', p) |
192 | ;; | 228 | self.nfs_server = m.group(1) |
193 | *-image*) | 229 | self.nfs_dir = m.group(2) |
194 | [ -z "$ROOTFS" ] || \ | 230 | self.rootfs = "" |
195 | error "conflicting ROOTFS args [$ROOTFS] and [$arg]" | 231 | self.check_arg_fstype('nfs') |
196 | if [ -f "$arg" ]; then | 232 | |
197 | process_filename $arg | 233 | def check_arg_path(self, p): |
198 | elif [ -d "$arg" ]; then | 234 | """ |
199 | # Handle the case where the nfsroot dir has -image- | 235 | - Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf |
200 | # in the pathname | 236 | - Check whether is a kernel file |
201 | echo "Assuming $arg is an nfs rootfs" | 237 | - Check whether is a image file |
202 | FSTYPE=nfs | 238 | - Check whether it is a nfs dir |
203 | ROOTFS=$arg | 239 | """ |
204 | else | 240 | if p.endswith('.qemuboot.conf'): |
205 | ROOTFS=$arg | 241 | self.qemuboot = p |
206 | LAZY_ROOTFS="true" | 242 | elif re.search('\.bin$', p) or re.search('bzImage', p) or \ |
207 | fi | 243 | re.search('zImage', p) or re.search('vmlinux', p) or \ |
208 | ;; | 244 | re.search('fitImage', p) or re.search('uImage', p): |
209 | "") break ;; | 245 | self.kernel = p |
210 | *) | 246 | elif os.path.exists(p) and (not os.path.isdir(p)) and re.search('-image-', os.path.basename(p)): |
211 | # A directory name is an nfs rootfs | 247 | self.rootfs = p |
212 | if [ -d "$arg" ]; then | 248 | dirpath = os.path.dirname(p) |
213 | echo "Assuming $arg is an nfs rootfs" | 249 | m = re.search('(.*)\.(.*)$', p) |
214 | if [ -z "$FSTYPE" -o "$FSTYPE" = "nfs" ]; then | 250 | if m: |
215 | FSTYPE=nfs | 251 | qb = '%s%s' % (re.sub('\.rootfs$', '', m.group(1)), '.qemuboot.conf') |
216 | else | 252 | if os.path.exists(qb): |
217 | error "conflicting FSTYPE types [$arg] and nfs" | 253 | self.qemuboot = qb |
218 | fi | 254 | else: |
219 | 255 | logger.warn("%s doesn't exist" % qb) | |
220 | if [ -z "$ROOTFS" ]; then | 256 | fst = m.group(2) |
221 | ROOTFS=$arg | 257 | self.check_arg_fstype(fst) |
222 | else | 258 | else: |
223 | error "conflicting ROOTFS args [$ROOTFS] and [$arg]" | 259 | raise Exception("Can't find FSTYPE from: %s" % p) |
224 | fi | 260 | elif os.path.isdir(p) or re.search(':', arg) and re.search('/', arg): |
225 | elif [ -f "$arg" ]; then | 261 | if self.is_deploy_dir_image(p): |
226 | process_filename $arg | 262 | logger.info('DEPLOY_DIR_IMAGE: %s' % p) |
227 | else | 263 | self.set("DEPLOY_DIR_IMAGE", p) |
228 | error "unable to classify arg [$arg]" | 264 | else: |
229 | fi | 265 | logger.info("Assuming %s is an nfs rootfs" % p) |
230 | ;; | 266 | self.check_arg_nfs(p) |
231 | esac | 267 | else: |
232 | shift | 268 | raise Exception("Unknown path arg %s" % p) |
233 | done | 269 | |
234 | 270 | def check_arg_machine(self, arg): | |
235 | if [ ! -c /dev/net/tun ] ; then | 271 | """Check whether it is a machine""" |
236 | echo "TUN control device /dev/net/tun is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" | 272 | if self.get('MACHINE') and self.get('MACHINE') != arg or re.search('/', arg): |
237 | exit 1 | 273 | raise Exception("Unknown arg: %s" % arg) |
238 | elif [ ! -w /dev/net/tun ] ; then | 274 | elif self.get('MACHINE') == arg: |
239 | echo "TUN control device /dev/net/tun is not writable, please fix (e.g. sudo chmod 666 /dev/net/tun)" | 275 | return |
240 | exit 1 | 276 | logger.info('Assuming MACHINE = %s' % arg) |
241 | fi | 277 | cmd = 'MACHINE=%s bitbake -e' % arg |
242 | 278 | logger.info('Running %s...' % cmd) | |
243 | # Report errors for missing combinations of options | 279 | self.bitbake_e = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
244 | if [ -z "$MACHINE" -a -z "$KERNEL" -a -z "$VM" -a "$FSTYPE" != "wic" ]; then | 280 | # bitbake -e doesn't report invalid MACHINE as an error, so |
245 | error "you must specify at least a MACHINE or KERNEL argument" | 281 | # let's check DEPLOY_DIR_IMAGE to make sure that it is a valid |
246 | fi | 282 | # MACHINE. |
247 | if [ "$FSTYPE" = "nfs" -a -z "$ROOTFS" ]; then | 283 | s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M) |
248 | error "NFS booting without an explicit ROOTFS path is not yet supported" | 284 | if s: |
249 | fi | 285 | deploy_dir_image = s.group(1) |
250 | 286 | else: | |
251 | if [ -z "$MACHINE" ]; then | 287 | raise Exception("bitbake -e %s" % self.bitbake_e) |
252 | if [ "$IS_VM" = "true" ]; then | 288 | if self.is_deploy_dir_image(deploy_dir_image): |
253 | [ "x$FSTYPE" = "xwic" ] && filename=$ROOTFS || filename=$VM | 289 | self.set_machine_deploy_dir(arg, deploy_dir_image) |
254 | MACHINE=`basename $filename | sed -n 's/.*\(qemux86-64\|qemux86\|qemuarm64\|qemuarm\|qemumips64\|qemumips\|qemuppc\|qemush4\).*/\1/p'` | 290 | else: |
255 | if [ -z "$MACHINE" ]; then | 291 | logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image) |
256 | error "Unable to set MACHINE from image filename [$VM]" | 292 | raise Exception("Failed to set MACHINE to %s. Unknown arg: %s" % (arg, arg)) |
257 | fi | 293 | |
258 | echo "Set MACHINE to [$MACHINE] based on image [$VM]" | 294 | def check_args(self): |
259 | else | 295 | unknown_arg = "" |
260 | MACHINE=`basename $KERNEL | sed -n 's/.*\(qemux86-64\|qemux86\|qemuarm64\|qemuarm\|qemumips64\|qemumips\|qemuppc\|qemush4\).*/\1/p'` | 296 | for arg in sys.argv[1:]: |
261 | if [ -z "$MACHINE" ]; then | 297 | if arg in self.fstypes + self.vmtypes: |
262 | error "Unable to set MACHINE from kernel filename [$KERNEL]" | 298 | self.check_arg_fstype(arg) |
263 | fi | 299 | elif arg == 'nographic': |
264 | echo "Set MACHINE to [$MACHINE] based on kernel [$KERNEL]" | 300 | self.qemu_opt_script += ' -nographic' |
265 | fi | 301 | self.kernel_cmdline_script += ' console=ttyS0' |
266 | fi | 302 | elif arg == 'serial': |
267 | 303 | self.kernel_cmdline_script += ' console=ttyS0' | |
268 | YOCTO_KVM_WIKI="https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu" | 304 | self.serialstdio = True |
269 | YOCTO_PARAVIRT_KVM_WIKI="https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM" | 305 | elif arg == 'audio': |
270 | # Detect KVM configuration | 306 | logger.info("Enabling audio in qemu") |
271 | if [ "x$KVM_ENABLED" = "xyes" ]; then | 307 | logger.info("Please install sound drivers in linux host") |
272 | if [ -z "$KVM_CAPABLE" ]; then | 308 | self.audio_enabled = True |
273 | echo "You are trying to enable KVM on a cpu without VT support." | 309 | elif arg == 'kvm': |
274 | echo "Remove kvm from the command-line, or refer" | 310 | self.kvm_enabled = True |
275 | echo "$YOCTO_KVM_WIKI"; | 311 | elif arg == 'kvm-vhost': |
276 | exit 1; | 312 | self.vhost_enabled = True |
277 | fi | 313 | elif arg == 'slirp': |
278 | if [ "x$MACHINE" != "xqemux86" -a "x$MACHINE" != "xqemux86-64" ]; then | 314 | self.slirp_enabled = True |
279 | echo "KVM only support x86 & x86-64. Remove kvm from the command-line"; | 315 | elif arg == 'publicvnc': |
280 | exit 1; | 316 | self.qemu_opt_script += ' -vnc :0' |
281 | fi | 317 | elif arg.startswith('tcpserial='): |
282 | if [ ! -e /dev/kvm ]; then | 318 | self.tcpserial_portnum = arg[len('tcpserial='):] |
283 | echo "Missing KVM device. Have you inserted kvm modules?" | 319 | elif arg.startswith('biosdir='): |
284 | echo "For further help see:" | 320 | self.custombiosdir = arg[len('biosdir='):] |
285 | echo "$YOCTO_KVM_WIKI"; | 321 | elif arg.startswith('biosfilename='): |
286 | exit 1; | 322 | self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):] |
287 | fi | 323 | elif arg.startswith('qemuparams='): |
288 | if [ -w /dev/kvm -a -r /dev/kvm ]; then | 324 | self.qemu_opt_script += ' %s' % arg[len('qemuparams='):] |
289 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -enable-kvm" | 325 | elif arg.startswith('bootparams='): |
290 | KVM_ACTIVE="yes" | 326 | self.kernel_cmdline_script += ' %s' % arg[len('bootparams='):] |
291 | else | 327 | elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)): |
292 | echo "You have no rights on /dev/kvm." | 328 | self.check_arg_path(os.path.abspath(arg)) |
293 | echo "Please change the ownership of this file as described at:" | 329 | elif re.search('-image-', arg): |
294 | echo "$YOCTO_KVM_WIKI"; | 330 | # Lazy rootfs |
295 | exit 1; | 331 | self.rootfs = arg |
296 | fi | 332 | else: |
297 | if [ "x$VHOST_ENABLED" = "xyes" ]; then | 333 | # At last, assume is it the MACHINE |
298 | if [ ! -e /dev/vhost-net ]; then | 334 | if (not unknown_arg) or unknown_arg == arg: |
299 | echo "Missing virtio net device. Have you inserted vhost-net module?" | 335 | unknown_arg = arg |
300 | echo "For further help see:" | 336 | else: |
301 | echo "$YOCTO_PARAVIRT_KVM_WIKI"; | 337 | raise Exception("Can't handle two unknown args: %s %s" % (unknown_arg, arg)) |
302 | exit 1; | 338 | # Check to make sure it is a valid machine |
303 | fi | 339 | if unknown_arg: |
304 | 340 | if self.get('MACHINE') == unknown_arg: | |
305 | if [ -w /dev/vhost-net -a -r /dev/vhost-net ]; then | 341 | return |
306 | VHOST_ACTIVE="yes" | 342 | if not self.get('DEPLOY_DIR_IMAGE'): |
307 | else | 343 | # Trying to get DEPLOY_DIR_IMAGE from env. |
308 | echo "You have no rights on /dev/vhost-net." | 344 | p = os.getenv('DEPLOY_DIR_IMAGE') |
309 | echo "Please change the ownership of this file as described at:" | 345 | if p and self.is_deploy_dir_image(p): |
310 | echo "$YOCTO_KVM_WIKI"; | 346 | machine = os.path.basename(p) |
311 | exit 1; | 347 | if unknown_arg == machine: |
312 | fi | 348 | self.set_machine_deploy_dir(machine, p) |
313 | fi | 349 | return |
314 | fi | 350 | else: |
315 | 351 | logger.info('DEPLOY_DIR_IMAGE: %s' % p) | |
316 | machine2=`echo $MACHINE | tr 'a-z' 'A-Z' | sed 's/-/_/'` | 352 | self.set("DEPLOY_DIR_IMAGE", p) |
317 | # MACHINE is now set for all cases | 353 | self.check_arg_machine(unknown_arg) |
318 | 354 | ||
319 | # Defaults used when these vars need to be inferred | 355 | def check_kvm(self): |
320 | QEMUX86_DEFAULT_KERNEL=bzImage-qemux86.bin | 356 | """Check kvm and kvm-host""" |
321 | QEMUX86_DEFAULT_FSTYPE=ext4 | 357 | if not (self.kvm_enabled or self.vhost_enabled): |
322 | 358 | self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU')) | |
323 | QEMUX86_64_DEFAULT_KERNEL=bzImage-qemux86-64.bin | 359 | return |
324 | QEMUX86_64_DEFAULT_FSTYPE=ext4 | 360 | |
325 | 361 | if not self.get('QB_CPU_KVM'): | |
326 | QEMUARM_DEFAULT_KERNEL=zImage-qemuarm.bin | 362 | raise Exception("QB_CPU_KVM is NULL, this board doesn't support kvm") |
327 | QEMUARM_DEFAULT_FSTYPE=ext4 | 363 | |
328 | 364 | self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM')) | |
329 | QEMUARM64_DEFAULT_KERNEL=Image-qemuarm64.bin | 365 | yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu" |
330 | QEMUARM64_DEFAULT_FSTYPE=ext4 | 366 | yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM" |
331 | 367 | dev_kvm = '/dev/kvm' | |
332 | QEMUMIPS_DEFAULT_KERNEL=vmlinux-qemumips.bin | 368 | dev_vhost = '/dev/vhost-net' |
333 | QEMUMIPS_DEFAULT_FSTYPE=ext4 | 369 | with open('/proc/cpuinfo', 'r') as f: |
334 | 370 | kvm_cap = re.search('vmx|svm', "".join(f.readlines())) | |
335 | QEMUMIPSEL_DEFAULT_KERNEL=vmlinux-qemumipsel.bin | 371 | if not kvm_cap: |
336 | QEMUMIPSEL_DEFAULT_FSTYPE=ext4 | 372 | logger.error("You are trying to enable KVM on a cpu without VT support.") |
337 | 373 | logger.error("Remove kvm from the command-line, or refer:") | |
338 | QEMUMIPS64_DEFAULT_KERNEL=vmlinux-qemumips64.bin | 374 | raise Exception(yocto_kvm_wiki) |
339 | QEMUMIPS64_DEFAULT_FSTYPE=ext4 | 375 | |
340 | 376 | if not os.path.exists(dev_kvm): | |
341 | QEMUSH4_DEFAULT_KERNEL=vmlinux-qemumips.bin | 377 | logger.error("Missing KVM device. Have you inserted kvm modules?") |
342 | QEMUSH4_DEFAULT_FSTYPE=ext4 | 378 | logger.error("For further help see:") |
343 | 379 | raise Exception(yocto_kvm_wiki) | |
344 | QEMUPPC_DEFAULT_KERNEL=vmlinux-qemuppc.bin | 380 | |
345 | QEMUPPC_DEFAULT_FSTYPE=ext4 | 381 | if os.access(dev_kvm, os.W_OK|os.R_OK): |
346 | 382 | self.qemu_opt_script += ' -enable-kvm' | |
347 | QEMUMICROBLAZE_DEFAULT_KERNEL=linux.bin.ub | 383 | else: |
348 | QEMUMICROBLAZE_DEFAULT_FSTYPE=cpio | 384 | logger.error("You have no read or write permission on /dev/kvm.") |
349 | 385 | logger.error("Please change the ownership of this file as described at:") | |
350 | QEMUZYNQ_DEFAULT_KERNEL=uImage | 386 | raise Exception(yocto_kvm_wiki) |
351 | QEMUZYNQ_DEFAULT_FSTYPE=cpio | 387 | |
352 | 388 | if self.vhost_enabled: | |
353 | QEMUZYNQMP_DEFAULT_KERNEL=Image | 389 | if not os.path.exists(dev_vhost): |
354 | QEMUZYNQMP_DEFAULT_FSTYPE=cpio | 390 | logger.error("Missing virtio net device. Have you inserted vhost-net module?") |
355 | 391 | logger.error("For further help see:") | |
356 | setup_path_vars() { | 392 | raise Exception(yocto_paravirt_kvm_wiki) |
357 | if [ -z "$OE_TMPDIR" ] ; then | 393 | |
358 | PATHS_REQUIRED=true | 394 | if not os.access(dev_kvm, os.W_OK|os.R_OK): |
359 | elif [ "$1" = "1" -a -z "$DEPLOY_DIR_IMAGE" ] ; then | 395 | logger.error("You have no read or write permission on /dev/vhost-net.") |
360 | PATHS_REQUIRED=true | 396 | logger.error("Please change the ownership of this file as described at:") |
361 | else | 397 | raise Exception(yocto_kvm_wiki) |
362 | PATHS_REQUIRED=false | 398 | |
363 | fi | 399 | def check_fstype(self): |
364 | 400 | """Check and setup FSTYPE""" | |
365 | if [ "$PATHS_REQUIRED" = "true" ]; then | 401 | if not self.fstype: |
366 | # Try to get the variable values from bitbake | 402 | fstype = self.get('QB_DEFAULT_FSTYPE') |
367 | type -P bitbake &>/dev/null || { | 403 | if fstype: |
368 | echo "In order for this script to dynamically infer paths"; | 404 | self.fstype = fstype |
369 | echo "to kernels or filesystem images, you either need"; | 405 | else: |
370 | echo "bitbake in your PATH or to source oe-init-build-env"; | 406 | raise Exception("FSTYPE is NULL!") |
371 | echo "before running this script" >&2; | 407 | |
372 | exit 1; } | 408 | def check_rootfs(self): |
373 | 409 | """Check and set rootfs""" | |
374 | # We have bitbake in PATH, get the variable values from bitbake | 410 | |
375 | BITBAKE_ENV_TMPFILE=`mktemp --tmpdir runqemu.XXXXXXXXXX` | 411 | if self.fstype == 'nfs': |
376 | if [ "$?" != "0" ] ; then | 412 | return |
377 | echo "Error: mktemp failed for bitbake environment output" | 413 | |
378 | exit 1 | 414 | if self.rootfs and not os.path.exists(self.rootfs): |
379 | fi | 415 | # Lazy rootfs |
380 | 416 | self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'), | |
381 | MACHINE=$MACHINE bitbake -e > $BITBAKE_ENV_TMPFILE | 417 | self.rootfs, self.get('MACHINE'), |
382 | if [ -z "$OE_TMPDIR" ] ; then | 418 | self.fstype) |
383 | OE_TMPDIR=`sed -n 's/^TMPDIR=\"\(.*\)\"/\1/p' $BITBAKE_ENV_TMPFILE` | 419 | elif not self.rootfs: |
384 | fi | 420 | cmd = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype) |
385 | if [ -z "$DEPLOY_DIR_IMAGE" ] ; then | 421 | all_files = glob.glob(cmd) |
386 | DEPLOY_DIR_IMAGE=`sed -n 's/^DEPLOY_DIR_IMAGE=\"\(.*\)\"/\1/p' $BITBAKE_ENV_TMPFILE` | 422 | if all_files: |
387 | fi | 423 | self.rootfs = all_files[0] |
388 | if [ -z "$QEMU_DTB" ] ; then | 424 | else: |
389 | QEMU_DTB=`sed -n 's/^QEMU_DTB=\"\(.*\)\"/\1/p' $BITBAKE_ENV_TMPFILE` | 425 | raise Exception("Failed to find rootfs: %s" % cmd) |
390 | fi | 426 | |
391 | if [ -z "$OE_TMPDIR" ]; then | 427 | if not os.path.exists(self.rootfs): |
392 | # Check for errors from bitbake that the user needs to know about | 428 | raise Exception("Can't find rootfs: %s" % self.rootfs) |
393 | BITBAKE_OUTPUT=`cat $BITBAKE_ENV_TMPFILE | wc -l` | 429 | |
394 | if [ "$BITBAKE_OUTPUT" -eq "0" ]; then | 430 | def check_kernel(self): |
395 | echo "Error: this script needs to be run from your build directory, or you need" | 431 | """Check and set kernel, dtb""" |
396 | echo "to explicitly set OE_TMPDIR and DEPLOY_DIR_IMAGE in your environment" | 432 | # The vm image doesn't need a kernel |
397 | else | 433 | if self.fstype in self.vmtypes: |
398 | echo "There was an error running bitbake to determine TMPDIR" | 434 | return |
399 | echo "Here is the output from 'bitbake -e':" | 435 | kernel = self.kernel |
400 | cat $BITBAKE_ENV_TMPFILE | 436 | if not kernel: |
401 | fi | 437 | kernel = "%s/%s" % (self.get('DEPLOY_DIR_IMAGE'), self.get('QB_DEFAULT_KERNEL')) |
402 | rm $BITBAKE_ENV_TMPFILE | 438 | |
403 | exit 1 | 439 | if os.path.exists(kernel): |
404 | fi | 440 | self.kernel = kernel |
405 | rm $BITBAKE_ENV_TMPFILE | 441 | else: |
406 | fi | 442 | raise Exception("KERNEL %s not found" % kernel) |
407 | } | 443 | |
408 | 444 | dtb = self.get('QB_DTB') | |
409 | setup_sysroot() { | 445 | if dtb: |
410 | # Toolchain installs set up $OECORE_NATIVE_SYSROOT in their | 446 | dtb = "%s/%s" % (self.get('DEPLOY_DIR_IMAGE'), dtb) |
411 | # environment script. If that variable isn't set, we're | 447 | if os.path.exists(dtb): |
412 | # either in an in-tree build scenario or the environment | 448 | self.set('QB_DTB', '-dtb %s' % dtb) |
413 | # script wasn't source'd. | 449 | else: |
414 | if [ -z "$OECORE_NATIVE_SYSROOT" ]; then | 450 | raise Exception("DTB %s not found" % dtb) |
415 | setup_path_vars | 451 | |
416 | BUILD_ARCH=`uname -m` | 452 | |
417 | BUILD_OS=`uname | tr '[A-Z]' '[a-z]'` | 453 | def check_biosdir(self): |
418 | BUILD_SYS="$BUILD_ARCH-$BUILD_OS" | 454 | """Check custombiosdir""" |
419 | 455 | if not self.custombiosdir: | |
420 | OECORE_NATIVE_SYSROOT=$OE_TMPDIR/sysroots/$BUILD_SYS | 456 | return |
421 | fi | 457 | |
422 | 458 | biosdir = "" | |
423 | # Some recipes store the BIOS under $OE_TMPDIR/sysroots/$MACHINE, | 459 | biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir) |
424 | # now defined as OECORE_MACHINE_SYSROOT. The latter is used when searching | 460 | biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir) |
425 | # BIOS, VGA BIOS and keymaps. | 461 | for i in (self.custombiosdir, biosdir_native, biosdir_host): |
426 | if [ -z "$OECORE_MACHINE_SYSROOT" ]; then | 462 | if os.path.isdir(i): |
427 | OECORE_MACHINE_SYSROOT=$OE_TMPDIR/sysroots/$MACHINE | 463 | biosdir = i |
428 | fi | 464 | break |
429 | } | 465 | |
430 | 466 | if biosdir: | |
431 | # Locate a rootfs image to boot which matches our expected | 467 | logger.info("Assuming biosdir is: %s" % biosdir) |
432 | # machine and fstype. | 468 | self.qemu_opt_script += ' -L %s' % biosdir |
433 | findimage() { | 469 | else: |
434 | where=$1 | 470 | logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host)) |
435 | machine=$2 | 471 | raise Exception("Invalid custombiosdir: %s" % self.custombiosdir) |
436 | extension=$3 | 472 | |
437 | 473 | def check_mem(self): | |
438 | # Sort rootfs candidates by modification time - the most | 474 | s = re.search('-m +([0-9]+)', self.qemu_opt_script) |
439 | # recently created one is the one we most likely want to boot. | 475 | if s: |
440 | filename=`ls -t1 $where/*-image*$machine.$extension 2>/dev/null | head -n1` | 476 | self.set('QB_MEM', '-m %s' % s.group(1)) |
441 | if [ "x$filename" != "x" ]; then | 477 | elif not self.get('QB_MEM'): |
442 | ROOTFS=$filename | 478 | logger.info('QB_MEM is not set, use 512M by default') |
443 | return | 479 | self.set('QB_MEM', '-m 512') |
444 | fi | 480 | |
445 | 481 | self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M' | |
446 | echo "Couldn't find a $machine rootfs image in $where." | 482 | self.qemu_opt_script += ' %s' % self.get('QB_MEM') |
447 | exit 1 | 483 | |
448 | } | 484 | def check_tcpserial(self): |
449 | 485 | if self.tcpserial_portnum: | |
450 | if [ -e "$ROOTFS" -a -z "$FSTYPE" ]; then | 486 | if self.get('QB_TCPSERIAL_OPT'): |
451 | # Extract the filename extension | 487 | self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum) |
452 | EXT=`echo $ROOTFS | awk -F . '{ print \$NF }'` | 488 | else: |
453 | if [ "x$EXT" = "xext2" -o "x$EXT" = "xext3" -o \ | 489 | self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum |
454 | "x$EXT" = "xjffs2" -o "x$EXT" = "xbtrfs" -o \ | 490 | |
455 | "x$EXT" = "xext4" ]; then | 491 | def check_and_set(self): |
456 | FSTYPE=$EXT | 492 | """Check configs sanity and set when needed""" |
457 | else | 493 | check_tun() |
458 | echo "Note: Unable to determine filesystem extension for $ROOTFS" | 494 | # Check audio |
459 | echo "We will use the default FSTYPE for $MACHINE" | 495 | if self.audio_enabled: |
460 | # ...which is done further below... | 496 | if not self.get('QB_AUDIO_DRV'): |
461 | fi | 497 | raise Exception("QB_AUDIO_DRV is NULL, this board doesn't support audio") |
462 | fi | 498 | if not self.get('QB_AUDIO_OPT'): |
463 | 499 | logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work') | |
464 | if [ -z "$KERNEL" -a "$IS_VM" = "false" ]; then \ | 500 | else: |
465 | setup_path_vars 1 | 501 | self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT') |
466 | eval kernel_file=\$${machine2}_DEFAULT_KERNEL | 502 | os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV')) |
467 | KERNEL=$DEPLOY_DIR_IMAGE/$kernel_file | 503 | else: |
468 | 504 | os.putenv('QEMU_AUDIO_DRV', 'none') | |
469 | if [ -z "$KERNEL" ]; then | 505 | |
470 | error "Unable to determine default kernel for MACHINE [$MACHINE]" | 506 | self.check_kvm() |
471 | fi | 507 | self.check_fstype() |
472 | fi | 508 | self.check_rootfs() |
473 | # KERNEL is now set for all cases | 509 | self.check_kernel() |
474 | 510 | self.check_biosdir() | |
475 | if [ -z "$FSTYPE" ]; then | 511 | self.check_mem() |
476 | eval FSTYPE=\$${machine2}_DEFAULT_FSTYPE | 512 | self.check_tcpserial() |
477 | 513 | ||
478 | if [ -z "$FSTYPE" ]; then | 514 | def read_qemuboot(self): |
479 | error "Unable to determine default fstype for MACHINE [$MACHINE]" | 515 | if not self.qemuboot: |
480 | fi | 516 | if self.get('DEPLOY_DIR_IMAGE'): |
481 | fi | 517 | deploy_dir_image = self.get('DEPLOY_DIR_IMAGE') |
482 | 518 | elif os.getenv('DEPLOY_DIR_IMAGE'): | |
483 | # FSTYPE is now set for all cases | 519 | deploy_dir_image = os.getenv('DEPLOY_DIR_IMAGE') |
484 | 520 | else: | |
485 | # Handle cases where a ROOTFS type is given instead of a filename, e.g. | 521 | raise Exception("DEPLOY_DIR_IMAGE is NULL!") |
486 | # core-image-sato | 522 | |
487 | if [ "$LAZY_ROOTFS" = "true" ]; then | 523 | if self.rootfs and not os.path.exists(self.rootfs): |
488 | setup_path_vars 1 | 524 | # Lazy rootfs |
489 | echo "Assuming $ROOTFS really means $DEPLOY_DIR_IMAGE/$ROOTFS-$MACHINE.$FSTYPE" | 525 | machine = self.get('MACHINE') |
490 | if [ "$IS_VM" = "true" ]; then | 526 | if not machine: |
491 | VM=$DEPLOY_DIR_IMAGE/$ROOTFS-$MACHINE.$FSTYPE | 527 | machine = os.path.basename(deploy_dir_image) |
492 | else | 528 | self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image, |
493 | ROOTFS=$DEPLOY_DIR_IMAGE/$ROOTFS-$MACHINE.$FSTYPE | 529 | self.rootfs, machine) |
494 | fi | 530 | else: |
495 | fi | 531 | cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image |
496 | 532 | logger.info('Running %s...' % cmd) | |
497 | if [ -z "$ROOTFS" ]; then | 533 | qbs = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') |
498 | setup_path_vars 1 | 534 | if qbs: |
499 | T=$DEPLOY_DIR_IMAGE | 535 | self.qemuboot = qbs.split()[0] |
500 | eval rootfs_list=\$${machine2}_DEFAULT_ROOTFS | 536 | |
501 | findimage $T $MACHINE $FSTYPE | 537 | if not os.path.exists(self.qemuboot): |
502 | 538 | raise Exception("Failed to find <image>.qemuboot.conf!") | |
503 | if [ -z "$ROOTFS" ]; then | 539 | |
504 | error "Unable to determine default rootfs for MACHINE [$MACHINE]" | 540 | logger.info('CONFFILE: %s' % self.qemuboot) |
505 | elif [ "$IS_VM" = "true" ]; then | 541 | |
506 | VM=$ROOTFS | 542 | cf = configparser.ConfigParser() |
507 | fi | 543 | cf.read(self.qemuboot) |
508 | fi | 544 | for k, v in cf.items('config_bsp'): |
509 | # ROOTFS is now set for all cases, now expand it to be an absolute path, it should exist at this point | 545 | k_upper = k.upper() |
510 | 546 | self.set(k_upper, v) | |
511 | ROOTFS=`readlink -f $ROOTFS` | 547 | |
512 | 548 | def print_config(self): | |
513 | echo "" | 549 | logger.info('Continuing with the following parameters:\n') |
514 | echo "Continuing with the following parameters:" | 550 | if not self.fstype in self.vmtypes: |
515 | if [ "$IS_VM" = "false" ]; then | 551 | print('KERNEL: [%s]' % self.kernel) |
516 | echo "KERNEL: [$KERNEL]" | 552 | print('MACHINE: [%s]' % self.get('MACHINE')) |
517 | echo "ROOTFS: [$ROOTFS]" | 553 | print('FSTYPE: [%s]' % self.fstype) |
518 | else | 554 | if self.fstype == 'nfs': |
519 | echo "VM: [$VM]" | 555 | print('NFS_DIR: [%s]' % self.nfs_dir) |
520 | fi | 556 | else: |
521 | echo "FSTYPE: [$FSTYPE]" | 557 | print('ROOTFS: [%s]' % self.rootfs) |
522 | 558 | print('CONFFILE: [%s]' % self.qemuboot) | |
523 | setup_sysroot | 559 | print('') |
524 | # OECORE_NATIVE_SYSROOT and OECORE_MACHINE_SYSROOT are now set for all cases | 560 | |
525 | 561 | def setup_nfs(self): | |
526 | INTERNAL_SCRIPT="$0-internal" | 562 | if not self.nfs_server: |
527 | if [ ! -f "$INTERNAL_SCRIPT" -o ! -r "$INTERNAL_SCRIPT" ]; then | 563 | if self.slirp_enabled: |
528 | INTERNAL_SCRIPT=`which runqemu-internal` | 564 | self.nfs_server = '10.0.2.2' |
529 | fi | 565 | else: |
530 | 566 | self.nfs_server = '192.168.7.1' | |
531 | # Specify directory for BIOS, VGA BIOS and keymaps | 567 | |
532 | if [ ! -z "$CUSTOMBIOSDIR" ]; then | 568 | nfs_instance = int(self.nfs_instance) |
533 | if [ -d "$OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR" ]; then | 569 | |
534 | echo "Assuming biosdir is $OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR" | 570 | mountd_rpcport = 21111 + nfs_instance |
535 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -L $OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR" | 571 | nfsd_rpcport = 11111 + nfs_instance |
536 | elif [ -d "$OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR" ]; then | 572 | nfsd_port = 3049 + 2 * nfs_instance |
537 | echo "Assuming biosdir is $OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR" | 573 | mountd_port = 3048 + 2 * nfs_instance |
538 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -L $OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR" | 574 | unfs_opts="nfsvers=3,port=%s,mountprog=%s,nfsprog=%s,udp,mountport=%s" % (nfsd_port, mountd_rpcport, nfsd_rpcport, mountd_port) |
539 | else | 575 | self.unfs_opts = unfs_opts |
540 | if [ ! -d "$CUSTOMBIOSDIR" ]; then | 576 | |
541 | echo "Custom BIOS directory not found. Tried: $CUSTOMBIOSDIR" | 577 | p = '%s/.runqemu-sdk/pseudo' % os.getenv('HOME') |
542 | echo "and $OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR" | 578 | os.putenv('PSEUDO_LOCALSTATEDIR', p) |
543 | echo "and $OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR" | 579 | |
544 | exit 1; | 580 | # Extract .tar.bz2 or .tar.bz if no self.nfs_dir |
545 | fi | 581 | if not self.nfs_dir: |
546 | echo "Assuming biosdir is $CUSTOMBIOSDIR" | 582 | src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME')) |
547 | SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -L $CUSTOMBIOSDIR" | 583 | dest = "%s-nfsroot" % src_prefix |
548 | fi | 584 | if os.path.exists('%s.pseudo_state' % dest): |
549 | fi | 585 | logger.info('Use %s as NFS_DIR' % dest) |
550 | 586 | self.nfs_dir = dest | |
551 | . $INTERNAL_SCRIPT | 587 | else: |
552 | exit $? | 588 | src = "" |
589 | src1 = '%s.tar.bz2' % src_prefix | ||
590 | src2 = '%s.tar.gz' % src_prefix | ||
591 | if os.path.exists(src1): | ||
592 | src = src1 | ||
593 | elif os.path.exists(src2): | ||
594 | src = src2 | ||
595 | if not src: | ||
596 | raise Exception("No NFS_DIR is set but can't find %s or %s to extract" % (src1, src2)) | ||
597 | logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest)) | ||
598 | cmd = 'runqemu-extract-sdk %s %s' % (src, dest) | ||
599 | logger.info('Running %s...' % cmd) | ||
600 | if subprocess.call(cmd, shell=True) != 0: | ||
601 | raise Exception('Failed to run %s' % cmd) | ||
602 | self.clean_nfs_dir = True | ||
603 | self.nfs_dir = dest | ||
604 | |||
605 | # Start the userspace NFS server | ||
606 | cmd = 'runqemu-export-rootfs restart %s' % self.nfs_dir | ||
607 | logger.info('Running %s...' % cmd) | ||
608 | if subprocess.call(cmd, shell=True) != 0: | ||
609 | raise Exception('Failed to run %s' % cmd) | ||
610 | |||
611 | self.nfs_running = True | ||
612 | |||
613 | |||
614 | def setup_slirp(self): | ||
615 | if self.fstype == 'nfs': | ||
616 | self.setup_nfs() | ||
617 | self.kernel_cmdline_script += ' ip=dhcp' | ||
618 | self.set('NETWORK_CMD', self.get('QB_SLIRP_OPT')) | ||
619 | |||
620 | def setup_tap(self): | ||
621 | """Setup tap""" | ||
622 | |||
623 | # This file is created when runqemu-gen-tapdevs creates a bank of tap | ||
624 | # devices, indicating that the user should not bring up new ones using | ||
625 | # sudo. | ||
626 | nosudo_flag = '/etc/runqemu-nosudo' | ||
627 | self.qemuifup = shutil.which('runqemu-ifup') | ||
628 | self.qemuifdown = shutil.which('runqemu-ifdown') | ||
629 | ip = shutil.which('ip') | ||
630 | lockdir = "/tmp/qemu-tap-locks" | ||
631 | |||
632 | if not (self.qemuifup and self.qemuifdown and ip): | ||
633 | raise Exception("runqemu-ifup, runqemu-ifdown or ip not found") | ||
634 | |||
635 | if not os.path.exists(lockdir): | ||
636 | os.mkdir(lockdir) | ||
637 | |||
638 | cmd = '%s link' % ip | ||
639 | logger.info('Running %s...' % cmd) | ||
640 | ip_link = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') | ||
641 | # Matches line like: 6: tap0: <foo> | ||
642 | possibles = re.findall('^[1-9]+: +(tap[0-9]+): <.*', ip_link, re.M) | ||
643 | tap = "" | ||
644 | for p in possibles: | ||
645 | lockfile = os.path.join(lockdir, p) | ||
646 | if os.path.exists('%s.skip' % lockfile): | ||
647 | logger.info('Found %s.skip, skipping %s' % (lockfile, p)) | ||
648 | continue | ||
649 | self.lock = lockfile + '.lock' | ||
650 | if self.acquire_lock(): | ||
651 | tap = p | ||
652 | logger.info("Using preconfigured tap device %s" % tap) | ||
653 | logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap)) | ||
654 | break | ||
655 | |||
656 | if not tap: | ||
657 | if os.path.exists(nosudo_flag): | ||
658 | logger.error("Error: There are no available tap devices to use for networking,") | ||
659 | logger.error("and I see %s exists, so I am not going to try creating" % nosudo_flag) | ||
660 | raise Exception("a new one with sudo.") | ||
661 | |||
662 | gid = os.getgid() | ||
663 | uid = os.getuid() | ||
664 | logger.info("Setting up tap interface under sudo") | ||
665 | cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.get('STAGING_DIR_NATIVE')) | ||
666 | tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n') | ||
667 | lockfile = os.path.join(lockdir, tap) | ||
668 | self.lock = lockfile + '.lock' | ||
669 | self.acquire_lock() | ||
670 | self.cleantap = True | ||
671 | logger.info('Created tap: %s' % tap) | ||
672 | |||
673 | self.tap = tap | ||
674 | n0 = tap[3:] | ||
675 | n1 = int(n0) * 2 + 1 | ||
676 | n2 = n1 + 1 | ||
677 | self.nfs_instance = n0 | ||
678 | if self.fstype == 'nfs': | ||
679 | self.setup_nfs() | ||
680 | self.kernel_cmdline_script += " ip=192.168.7.%s::192.168.7.%s:255.255.255.0" % (n2, n1) | ||
681 | qb_tap_opt = self.get('QB_TAP_OPT') | ||
682 | if qb_tap_opt: | ||
683 | qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap) | ||
684 | else: | ||
685 | qemu_tap_opt = "-net nic,model=virtio -net tap,vlan=0,ifname=%s,script=no,downscript=no" % self.tap | ||
686 | |||
687 | if self.vhost_enabled: | ||
688 | qemu_tap_opt += ',vhost=on' | ||
689 | |||
690 | self.set('NETWORK_CMD', qemu_tap_opt) | ||
691 | |||
692 | def setup_network(self): | ||
693 | cmd = "stty -g" | ||
694 | self.saved_stty = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') | ||
695 | if self.slirp_enabled: | ||
696 | self.setup_slirp() | ||
697 | else: | ||
698 | self.setup_tap() | ||
699 | |||
700 | qb_rootfs_opt = self.get('QB_ROOTFS_OPT') | ||
701 | if qb_rootfs_opt: | ||
702 | self.rootfs_options = qb_rootfs_opt.replace('@ROOTFS@', self.rootfs) | ||
703 | else: | ||
704 | self.rootfs_options = '-drive file=%s,if=virtio,format=raw' % self.rootfs | ||
705 | |||
706 | if self.fstype in ('cpio.gz', 'cpio'): | ||
707 | self.set('NETWORK_CMD', '') | ||
708 | self.kernel_cmdline = 'root=/dev/ram0 rw debugshell' | ||
709 | self.rootfs_options = '-initrd %s' % self.rootfs | ||
710 | else: | ||
711 | if self.fstype in self.vmtypes: | ||
712 | if self.fstype == 'iso': | ||
713 | vm_drive = '-cdrom %s' % self.rootfs | ||
714 | else: | ||
715 | cmd1 = "grep -q 'root=/dev/sd' %s" % self.rootfs | ||
716 | cmd2 = "grep -q 'root=/dev/hd' %s" % self.rootfs | ||
717 | if subprocess.call(cmd1, shell=True) == 0: | ||
718 | logger.info('Using scsi drive') | ||
719 | vm_drive = '-drive if=none,id=hd,file=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' % self.rootfs | ||
720 | elif subprocess.call(cmd2, shell=True) == 0: | ||
721 | logger.info('Using scsi drive') | ||
722 | vm_drive = self.rootfs | ||
723 | else: | ||
724 | logger.warn("Can't detect drive type %s" % self.rootfs) | ||
725 | logger.warn('Tring to use virtio block drive') | ||
726 | vm_drive = '-drive if=virtio,file=%s' % self.rootfs | ||
727 | self.rootfs_options = '%s -no-reboot' % vm_drive | ||
728 | self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT')) | ||
729 | |||
730 | if self.fstype == 'nfs': | ||
731 | self.rootfs_options = '' | ||
732 | k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, self.nfs_dir, self.unfs_opts) | ||
733 | self.kernel_cmdline = 'root=%s rw highres=off' % k_root | ||
734 | |||
735 | self.set('ROOTFS_OPTIONS', self.rootfs_options) | ||
736 | |||
737 | def setup_final(self): | ||
738 | qemu_system = self.get('QB_SYSTEM_NAME') | ||
739 | if not qemu_system: | ||
740 | raise Exception("Failed to boot, QB_SYSTEM_NAME is NULL!") | ||
741 | |||
742 | qemu_bin = '%s/%s' % (self.get('STAGING_BINDIR_NATIVE'), qemu_system) | ||
743 | if not os.access(qemu_bin, os.X_OK): | ||
744 | raise Exception("No QEMU binary '%s' could be found" % qemu_bin) | ||
745 | |||
746 | check_libgl(qemu_bin) | ||
747 | |||
748 | self.qemu_opt = "%s %s %s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.qemu_opt_script, self.get('ROOTFS_OPTIONS'), self.get('QB_DTB'), self.get('QB_OPT_APPEND')) | ||
749 | |||
750 | if self.serialstdio: | ||
751 | logger.info("Interrupt character is '^]'") | ||
752 | cmd = "stty intr ^]" | ||
753 | subprocess.call(cmd, shell=True) | ||
754 | |||
755 | first_serial = "" | ||
756 | if not re.search("-nographic", self.qemu_opt): | ||
757 | first_serial = "-serial mon:vc" | ||
758 | # We always want a ttyS1. Since qemu by default adds a serial | ||
759 | # port when nodefaults is not specified, it seems that all that | ||
760 | # would be needed is to make sure a "-serial" is there. However, | ||
761 | # it appears that when "-serial" is specified, it ignores the | ||
762 | # default serial port that is normally added. So here we make | ||
763 | # sure to add two -serial if there are none. And only one if | ||
764 | # there is one -serial already. | ||
765 | serial_num = len(re.findall("-serial", self.qemu_opt)) | ||
766 | if serial_num == 0: | ||
767 | self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT")) | ||
768 | elif serial_num == 1: | ||
769 | self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT") | ||
770 | |||
771 | def start_qemu(self): | ||
772 | if self.kernel: | ||
773 | kernel_opts = "-kernel %s -append '%s %s %s'" % (self.kernel, self.kernel_cmdline, self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND')) | ||
774 | else: | ||
775 | kernel_opts = "" | ||
776 | cmd = "%s %s" % (self.qemu_opt, kernel_opts) | ||
777 | logger.info('Running %s' % cmd) | ||
778 | if subprocess.call(cmd, shell=True) != 0: | ||
779 | raise Exception('Failed to run %s' % cmd) | ||
780 | |||
781 | def cleanup(self): | ||
782 | if self.cleantap: | ||
783 | cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.get('STAGING_DIR_NATIVE')) | ||
784 | logger.info('Running %s' % cmd) | ||
785 | subprocess.call(cmd, shell=True) | ||
786 | if self.lock_descriptor: | ||
787 | logger.info("Releasing lockfile for tap device '%s'" % self.tap) | ||
788 | self.release_lock() | ||
789 | |||
790 | if self.nfs_running: | ||
791 | logger.info("Shutting down the userspace NFS server...") | ||
792 | cmd = "runqemu-export-rootfs stop %s" % self.nfs_dir | ||
793 | logger.info('Running %s' % cmd) | ||
794 | subprocess.call(cmd, shell=True) | ||
795 | |||
796 | if self.saved_stty: | ||
797 | cmd = "stty %s" % self.saved_stty | ||
798 | subprocess.call(cmd, shell=True) | ||
799 | |||
800 | if self.clean_nfs_dir: | ||
801 | logger.info('Removing %s' % self.nfs_dir) | ||
802 | shutil.rmtree(self.nfs_dir) | ||
803 | shutil.rmtree('%s.pseudo_state' % self.nfs_dir) | ||
804 | |||
805 | def main(): | ||
806 | if len(sys.argv) == 1 or "help" in sys.argv: | ||
807 | print_usage() | ||
808 | return 0 | ||
809 | config = BaseConfig() | ||
810 | try: | ||
811 | config.check_args() | ||
812 | except Exception as esc: | ||
813 | logger.error(esc) | ||
814 | logger.error("Try 'runqemu help' on how to use it") | ||
815 | return 1 | ||
816 | config.read_qemuboot() | ||
817 | config.check_and_set() | ||
818 | config.print_config() | ||
819 | try: | ||
820 | config.setup_network() | ||
821 | config.setup_final() | ||
822 | config.start_qemu() | ||
823 | finally: | ||
824 | config.cleanup() | ||
825 | return 0 | ||
826 | |||
827 | if __name__ == "__main__": | ||
828 | try: | ||
829 | ret = main() | ||
830 | except Exception as esc: | ||
831 | ret = 1 | ||
832 | import traceback | ||
833 | traceback.print_exc() | ||
834 | sys.exit(ret) | ||