summaryrefslogtreecommitdiffstats
path: root/scripts/lib/mic/utils
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/lib/mic/utils')
-rw-r--r--scripts/lib/mic/utils/BmapCreate.py298
-rw-r--r--scripts/lib/mic/utils/Fiemap.py252
-rw-r--r--scripts/lib/mic/utils/__init__.py0
-rw-r--r--scripts/lib/mic/utils/cmdln.py1586
-rw-r--r--scripts/lib/mic/utils/errors.py71
-rw-r--r--scripts/lib/mic/utils/fs_related.py1060
-rw-r--r--scripts/lib/mic/utils/gpt_parser.py331
-rw-r--r--scripts/lib/mic/utils/grabber.py97
-rw-r--r--scripts/lib/mic/utils/misc.py1065
-rw-r--r--scripts/lib/mic/utils/oe/__init__.py22
-rw-r--r--scripts/lib/mic/utils/oe/misc.py144
-rw-r--r--scripts/lib/mic/utils/oe/package_manager.py810
-rw-r--r--scripts/lib/mic/utils/partitionedfs.py782
-rw-r--r--scripts/lib/mic/utils/proxy.py183
-rw-r--r--scripts/lib/mic/utils/rpmmisc.py600
-rw-r--r--scripts/lib/mic/utils/runner.py109
16 files changed, 7410 insertions, 0 deletions
diff --git a/scripts/lib/mic/utils/BmapCreate.py b/scripts/lib/mic/utils/BmapCreate.py
new file mode 100644
index 0000000000..65b19a5f46
--- /dev/null
+++ b/scripts/lib/mic/utils/BmapCreate.py
@@ -0,0 +1,298 @@
1""" This module implements the block map (bmap) creation functionality and
2provides the corresponding API in form of the 'BmapCreate' class.
3
4The idea is that while images files may generally be very large (e.g., 4GiB),
5they may nevertheless contain only little real data, e.g., 512MiB. This data
6are files, directories, file-system meta-data, partition table, etc. When
7copying the image to the target device, you do not have to copy all the 4GiB of
8data, you can copy only 512MiB of it, which is 4 times less, so copying should
9presumably be 4 times faster.
10
11The block map file is an XML file which contains a list of blocks which have to
12be copied to the target device. The other blocks are not used and there is no
13need to copy them. The XML file also contains some additional information like
14block size, image size, count of mapped blocks, etc. There are also many
15commentaries, so it is human-readable.
16
17The image has to be a sparse file. Generally, this means that when you generate
18this image file, you should start with a huge sparse file which contains a
19single hole spanning the entire file. Then you should partition it, write all
20the data (probably by means of loop-back mounting the image or parts of it),
21etc. The end result should be a sparse file where mapped areas represent useful
22parts of the image and holes represent useless parts of the image, which do not
23have to be copied when copying the image to the target device.
24
25This module uses the FIBMAP ioctl to detect holes. """
26
27# Disable the following pylint recommendations:
28# * Too many instance attributes - R0902
29# * Too few public methods - R0903
30# pylint: disable=R0902,R0903
31
32import hashlib
33from mic.utils.misc import human_size
34from mic.utils import Fiemap
35
36# The bmap format version we generate
37SUPPORTED_BMAP_VERSION = "1.3"
38
39_BMAP_START_TEMPLATE = \
40"""<?xml version="1.0" ?>
41<!-- This file contains the block map for an image file, which is basically
42 a list of useful (mapped) block numbers in the image file. In other words,
43 it lists only those blocks which contain data (boot sector, partition
44 table, file-system metadata, files, directories, extents, etc). These
45 blocks have to be copied to the target device. The other blocks do not
46 contain any useful data and do not have to be copied to the target
47 device.
48
49 The block map an optimization which allows to copy or flash the image to
50 the image quicker than copying of flashing the entire image. This is
51 because with bmap less data is copied: <MappedBlocksCount> blocks instead
52 of <BlocksCount> blocks.
53
54 Besides the machine-readable data, this file contains useful commentaries
55 which contain human-readable information like image size, percentage of
56 mapped data, etc.
57
58 The 'version' attribute is the block map file format version in the
59 'major.minor' format. The version major number is increased whenever an
60 incompatible block map format change is made. The minor number changes
61 in case of minor backward-compatible changes. -->
62
63<bmap version="%s">
64 <!-- Image size in bytes: %s -->
65 <ImageSize> %u </ImageSize>
66
67 <!-- Size of a block in bytes -->
68 <BlockSize> %u </BlockSize>
69
70 <!-- Count of blocks in the image file -->
71 <BlocksCount> %u </BlocksCount>
72
73"""
74
75class Error(Exception):
76 """ A class for exceptions generated by this module. We currently support
77 only one type of exceptions, and we basically throw human-readable problem
78 description in case of errors. """
79 pass
80
81class BmapCreate:
82 """ This class implements the bmap creation functionality. To generate a
83 bmap for an image (which is supposedly a sparse file), you should first
84 create an instance of 'BmapCreate' and provide:
85
86 * full path or a file-like object of the image to create bmap for
87 * full path or a file object to use for writing the results to
88
89 Then you should invoke the 'generate()' method of this class. It will use
90 the FIEMAP ioctl to generate the bmap. """
91
92 def _open_image_file(self):
93 """ Open the image file. """
94
95 try:
96 self._f_image = open(self._image_path, 'rb')
97 except IOError as err:
98 raise Error("cannot open image file '%s': %s" \
99 % (self._image_path, err))
100
101 self._f_image_needs_close = True
102
103 def _open_bmap_file(self):
104 """ Open the bmap file. """
105
106 try:
107 self._f_bmap = open(self._bmap_path, 'w+')
108 except IOError as err:
109 raise Error("cannot open bmap file '%s': %s" \
110 % (self._bmap_path, err))
111
112 self._f_bmap_needs_close = True
113
114 def __init__(self, image, bmap):
115 """ Initialize a class instance:
116 * image - full path or a file-like object of the image to create bmap
117 for
118 * bmap - full path or a file object to use for writing the resulting
119 bmap to """
120
121 self.image_size = None
122 self.image_size_human = None
123 self.block_size = None
124 self.blocks_cnt = None
125 self.mapped_cnt = None
126 self.mapped_size = None
127 self.mapped_size_human = None
128 self.mapped_percent = None
129
130 self._mapped_count_pos1 = None
131 self._mapped_count_pos2 = None
132 self._sha1_pos = None
133
134 self._f_image_needs_close = False
135 self._f_bmap_needs_close = False
136
137 if hasattr(image, "read"):
138 self._f_image = image
139 self._image_path = image.name
140 else:
141 self._image_path = image
142 self._open_image_file()
143
144 if hasattr(bmap, "read"):
145 self._f_bmap = bmap
146 self._bmap_path = bmap.name
147 else:
148 self._bmap_path = bmap
149 self._open_bmap_file()
150
151 self.fiemap = Fiemap.Fiemap(self._f_image)
152
153 self.image_size = self.fiemap.image_size
154 self.image_size_human = human_size(self.image_size)
155 if self.image_size == 0:
156 raise Error("cannot generate bmap for zero-sized image file '%s'" \
157 % self._image_path)
158
159 self.block_size = self.fiemap.block_size
160 self.blocks_cnt = self.fiemap.blocks_cnt
161
162 def _bmap_file_start(self):
163 """ A helper function which generates the starting contents of the
164 block map file: the header comment, image size, block size, etc. """
165
166 # We do not know the amount of mapped blocks at the moment, so just put
167 # whitespaces instead of real numbers. Assume the longest possible
168 # numbers.
169 mapped_count = ' ' * len(str(self.image_size))
170 mapped_size_human = ' ' * len(self.image_size_human)
171
172 xml = _BMAP_START_TEMPLATE \
173 % (SUPPORTED_BMAP_VERSION, self.image_size_human,
174 self.image_size, self.block_size, self.blocks_cnt)
175 xml += " <!-- Count of mapped blocks: "
176
177 self._f_bmap.write(xml)
178 self._mapped_count_pos1 = self._f_bmap.tell()
179
180 # Just put white-spaces instead of real information about mapped blocks
181 xml = "%s or %.1f -->\n" % (mapped_size_human, 100.0)
182 xml += " <MappedBlocksCount> "
183
184 self._f_bmap.write(xml)
185 self._mapped_count_pos2 = self._f_bmap.tell()
186
187 xml = "%s </MappedBlocksCount>\n\n" % mapped_count
188
189 # pylint: disable=C0301
190 xml += " <!-- The checksum of this bmap file. When it is calculated, the value of\n"
191 xml += " the SHA1 checksum has be zeoro (40 ASCII \"0\" symbols). -->\n"
192 xml += " <BmapFileSHA1> "
193
194 self._f_bmap.write(xml)
195 self._sha1_pos = self._f_bmap.tell()
196
197 xml = "0" * 40 + " </BmapFileSHA1>\n\n"
198 xml += " <!-- The block map which consists of elements which may either be a\n"
199 xml += " range of blocks or a single block. The 'sha1' attribute (if present)\n"
200 xml += " is the SHA1 checksum of this blocks range. -->\n"
201 xml += " <BlockMap>\n"
202 # pylint: enable=C0301
203
204 self._f_bmap.write(xml)
205
206 def _bmap_file_end(self):
207 """ A helper function which generates the final parts of the block map
208 file: the ending tags and the information about the amount of mapped
209 blocks. """
210
211 xml = " </BlockMap>\n"
212 xml += "</bmap>\n"
213
214 self._f_bmap.write(xml)
215
216 self._f_bmap.seek(self._mapped_count_pos1)
217 self._f_bmap.write("%s or %.1f%%" % \
218 (self.mapped_size_human, self.mapped_percent))
219
220 self._f_bmap.seek(self._mapped_count_pos2)
221 self._f_bmap.write("%u" % self.mapped_cnt)
222
223 self._f_bmap.seek(0)
224 sha1 = hashlib.sha1(self._f_bmap.read()).hexdigest()
225 self._f_bmap.seek(self._sha1_pos)
226 self._f_bmap.write("%s" % sha1)
227
228 def _calculate_sha1(self, first, last):
229 """ A helper function which calculates SHA1 checksum for the range of
230 blocks of the image file: from block 'first' to block 'last'. """
231
232 start = first * self.block_size
233 end = (last + 1) * self.block_size
234
235 self._f_image.seek(start)
236 hash_obj = hashlib.new("sha1")
237
238 chunk_size = 1024*1024
239 to_read = end - start
240 read = 0
241
242 while read < to_read:
243 if read + chunk_size > to_read:
244 chunk_size = to_read - read
245 chunk = self._f_image.read(chunk_size)
246 hash_obj.update(chunk)
247 read += chunk_size
248
249 return hash_obj.hexdigest()
250
251 def generate(self, include_checksums = True):
252 """ Generate bmap for the image file. If 'include_checksums' is 'True',
253 also generate SHA1 checksums for block ranges. """
254
255 # Save image file position in order to restore it at the end
256 image_pos = self._f_image.tell()
257
258 self._bmap_file_start()
259
260 # Generate the block map and write it to the XML block map
261 # file as we go.
262 self.mapped_cnt = 0
263 for first, last in self.fiemap.get_mapped_ranges(0, self.blocks_cnt):
264 self.mapped_cnt += last - first + 1
265 if include_checksums:
266 sha1 = self._calculate_sha1(first, last)
267 sha1 = " sha1=\"%s\"" % sha1
268 else:
269 sha1 = ""
270
271 if first != last:
272 self._f_bmap.write(" <Range%s> %s-%s </Range>\n" \
273 % (sha1, first, last))
274 else:
275 self._f_bmap.write(" <Range%s> %s </Range>\n" \
276 % (sha1, first))
277
278 self.mapped_size = self.mapped_cnt * self.block_size
279 self.mapped_size_human = human_size(self.mapped_size)
280 self.mapped_percent = (self.mapped_cnt * 100.0) / self.blocks_cnt
281
282 self._bmap_file_end()
283
284 try:
285 self._f_bmap.flush()
286 except IOError as err:
287 raise Error("cannot flush the bmap file '%s': %s" \
288 % (self._bmap_path, err))
289
290 self._f_image.seek(image_pos)
291
292 def __del__(self):
293 """ The class destructor which closes the opened files. """
294
295 if self._f_image_needs_close:
296 self._f_image.close()
297 if self._f_bmap_needs_close:
298 self._f_bmap.close()
diff --git a/scripts/lib/mic/utils/Fiemap.py b/scripts/lib/mic/utils/Fiemap.py
new file mode 100644
index 0000000000..f2db6ff0b8
--- /dev/null
+++ b/scripts/lib/mic/utils/Fiemap.py
@@ -0,0 +1,252 @@
1""" This module implements python API for the FIEMAP ioctl. The FIEMAP ioctl
2allows to find holes and mapped areas in a file. """
3
4# Note, a lot of code in this module is not very readable, because it deals
5# with the rather complex FIEMAP ioctl. To understand the code, you need to
6# know the FIEMAP interface, which is documented in the
7# Documentation/filesystems/fiemap.txt file in the Linux kernel sources.
8
9# Disable the following pylint recommendations:
10# * Too many instance attributes (R0902)
11# pylint: disable=R0902
12
13import os
14import struct
15import array
16import fcntl
17from mic.utils.misc import get_block_size
18
19# Format string for 'struct fiemap'
20_FIEMAP_FORMAT = "=QQLLLL"
21# sizeof(struct fiemap)
22_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
23# Format string for 'struct fiemap_extent'
24_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
25# sizeof(struct fiemap_extent)
26_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
27# The FIEMAP ioctl number
28_FIEMAP_IOCTL = 0xC020660B
29
30# Minimum buffer which is required for 'class Fiemap' to operate
31MIN_BUFFER_SIZE = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE
32# The default buffer size for 'class Fiemap'
33DEFAULT_BUFFER_SIZE = 256 * 1024
34
35class Error(Exception):
36 """ A class for exceptions generated by this module. We currently support
37 only one type of exceptions, and we basically throw human-readable problem
38 description in case of errors. """
39 pass
40
41class Fiemap:
42 """ This class provides API to the FIEMAP ioctl. Namely, it allows to
43 iterate over all mapped blocks and over all holes. """
44
45 def _open_image_file(self):
46 """ Open the image file. """
47
48 try:
49 self._f_image = open(self._image_path, 'rb')
50 except IOError as err:
51 raise Error("cannot open image file '%s': %s" \
52 % (self._image_path, err))
53
54 self._f_image_needs_close = True
55
56 def __init__(self, image, buf_size = DEFAULT_BUFFER_SIZE):
57 """ Initialize a class instance. The 'image' argument is full path to
58 the file to operate on, or a file object to operate on.
59
60 The 'buf_size' argument is the size of the buffer for 'struct
61 fiemap_extent' elements which will be used when invoking the FIEMAP
62 ioctl. The larger is the buffer, the less times the FIEMAP ioctl will
63 be invoked. """
64
65 self._f_image_needs_close = False
66
67 if hasattr(image, "fileno"):
68 self._f_image = image
69 self._image_path = image.name
70 else:
71 self._image_path = image
72 self._open_image_file()
73
74 # Validate 'buf_size'
75 if buf_size < MIN_BUFFER_SIZE:
76 raise Error("too small buffer (%d bytes), minimum is %d bytes" \
77 % (buf_size, MIN_BUFFER_SIZE))
78
79 # How many 'struct fiemap_extent' elements fit the buffer
80 buf_size -= _FIEMAP_SIZE
81 self._fiemap_extent_cnt = buf_size / _FIEMAP_EXTENT_SIZE
82 self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
83 self._buf_size += _FIEMAP_SIZE
84
85 # Allocate a mutable buffer for the FIEMAP ioctl
86 self._buf = array.array('B', [0] * self._buf_size)
87
88 self.image_size = os.fstat(self._f_image.fileno()).st_size
89
90 try:
91 self.block_size = get_block_size(self._f_image)
92 except IOError as err:
93 raise Error("cannot get block size for '%s': %s" \
94 % (self._image_path, err))
95
96 self.blocks_cnt = self.image_size + self.block_size - 1
97 self.blocks_cnt /= self.block_size
98
99 # Synchronize the image file to make sure FIEMAP returns correct values
100 try:
101 self._f_image.flush()
102 except IOError as err:
103 raise Error("cannot flush image file '%s': %s" \
104 % (self._image_path, err))
105 try:
106 os.fsync(self._f_image.fileno()),
107 except OSError as err:
108 raise Error("cannot synchronize image file '%s': %s " \
109 % (self._image_path, err.strerror))
110
111 # Check if the FIEMAP ioctl is supported
112 self.block_is_mapped(0)
113
114 def __del__(self):
115 """ The class destructor which closes the opened files. """
116
117 if self._f_image_needs_close:
118 self._f_image.close()
119
120 def _invoke_fiemap(self, block, count):
121 """ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
122 block number 'block'.
123
124 The full result of the operation is stored in 'self._buf' on exit.
125 Returns the unpacked 'struct fiemap' data structure in form of a python
126 list (just like 'struct.upack()'). """
127
128 if block < 0 or block >= self.blocks_cnt:
129 raise Error("bad block number %d, should be within [0, %d]" \
130 % (block, self.blocks_cnt))
131
132 # Initialize the 'struct fiemap' part of the buffer
133 struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
134 count * self.block_size, 0, 0,
135 self._fiemap_extent_cnt, 0)
136
137 try:
138 fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
139 except IOError as err:
140 error_msg = "the FIEMAP ioctl failed for '%s': %s" \
141 % (self._image_path, err)
142 if err.errno == os.errno.EPERM or err.errno == os.errno.EACCES:
143 # The FIEMAP ioctl was added in kernel version 2.6.28 in 2008
144 error_msg += " (looks like your kernel does not support FIEMAP)"
145
146 raise Error(error_msg)
147
148 return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
149
150 def block_is_mapped(self, block):
151 """ This function returns 'True' if block number 'block' of the image
152 file is mapped and 'False' otherwise. """
153
154 struct_fiemap = self._invoke_fiemap(block, 1)
155
156 # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
157 # If it contains zero, the block is not mapped, otherwise it is
158 # mapped.
159 return bool(struct_fiemap[3])
160
161 def block_is_unmapped(self, block):
162 """ This function returns 'True' if block number 'block' of the image
163 file is not mapped (hole) and 'False' otherwise. """
164
165 return not self.block_is_mapped(block)
166
167 def _unpack_fiemap_extent(self, index):
168 """ Unpack a 'struct fiemap_extent' structure object number 'index'
169 from the internal 'self._buf' buffer. """
170
171 offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
172 return struct.unpack(_FIEMAP_EXTENT_FORMAT,
173 self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
174
175 def _do_get_mapped_ranges(self, start, count):
176 """ Implements most the functionality for the 'get_mapped_ranges()'
177 generator: invokes the FIEMAP ioctl, walks through the mapped
178 extents and yields mapped block ranges. However, the ranges may be
179 consecutive (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()'
180 simply merges them. """
181
182 block = start
183 while block < start + count:
184 struct_fiemap = self._invoke_fiemap(block, count)
185
186 mapped_extents = struct_fiemap[3]
187 if mapped_extents == 0:
188 # No more mapped blocks
189 return
190
191 extent = 0
192 while extent < mapped_extents:
193 fiemap_extent = self._unpack_fiemap_extent(extent)
194
195 # Start of the extent
196 extent_start = fiemap_extent[0]
197 # Starting block number of the extent
198 extent_block = extent_start / self.block_size
199 # Length of the extent
200 extent_len = fiemap_extent[2]
201 # Count of blocks in the extent
202 extent_count = extent_len / self.block_size
203
204 # Extent length and offset have to be block-aligned
205 assert extent_start % self.block_size == 0
206 assert extent_len % self.block_size == 0
207
208 if extent_block > start + count - 1:
209 return
210
211 first = max(extent_block, block)
212 last = min(extent_block + extent_count, start + count) - 1
213 yield (first, last)
214
215 extent += 1
216
217 block = extent_block + extent_count
218
219 def get_mapped_ranges(self, start, count):
220 """ A generator which yields ranges of mapped blocks in the file. The
221 ranges are tuples of 2 elements: [first, last], where 'first' is the
222 first mapped block and 'last' is the last mapped block.
223
224 The ranges are yielded for the area of the file of size 'count' blocks,
225 starting from block 'start'. """
226
227 iterator = self._do_get_mapped_ranges(start, count)
228
229 first_prev, last_prev = iterator.next()
230
231 for first, last in iterator:
232 if last_prev == first - 1:
233 last_prev = last
234 else:
235 yield (first_prev, last_prev)
236 first_prev, last_prev = first, last
237
238 yield (first_prev, last_prev)
239
240 def get_unmapped_ranges(self, start, count):
241 """ Just like 'get_mapped_ranges()', but yields unmapped block ranges
242 instead (holes). """
243
244 hole_first = start
245 for first, last in self._do_get_mapped_ranges(start, count):
246 if first > hole_first:
247 yield (hole_first, first - 1)
248
249 hole_first = last + 1
250
251 if hole_first < start + count:
252 yield (hole_first, start + count - 1)
diff --git a/scripts/lib/mic/utils/__init__.py b/scripts/lib/mic/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/scripts/lib/mic/utils/__init__.py
diff --git a/scripts/lib/mic/utils/cmdln.py b/scripts/lib/mic/utils/cmdln.py
new file mode 100644
index 0000000000..b099473ee4
--- /dev/null
+++ b/scripts/lib/mic/utils/cmdln.py
@@ -0,0 +1,1586 @@
1#!/usr/bin/env python
2# Copyright (c) 2002-2007 ActiveState Software Inc.
3# License: MIT (see LICENSE.txt for license details)
4# Author: Trent Mick
5# Home: http://trentm.com/projects/cmdln/
6
7"""An improvement on Python's standard cmd.py module.
8
9As with cmd.py, this module provides "a simple framework for writing
10line-oriented command intepreters." This module provides a 'RawCmdln'
11class that fixes some design flaws in cmd.Cmd, making it more scalable
12and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
13or simple shells. And it provides a 'Cmdln' class that add
14optparse-based option processing. Basically you use it like this:
15
16 import cmdln
17
18 class MySVN(cmdln.Cmdln):
19 name = "svn"
20
21 @cmdln.alias('stat', 'st')
22 @cmdln.option('-v', '--verbose', action='store_true'
23 help='print verbose information')
24 def do_status(self, subcmd, opts, *paths):
25 print "handle 'svn status' command"
26
27 #...
28
29 if __name__ == "__main__":
30 shell = MySVN()
31 retval = shell.main()
32 sys.exit(retval)
33
34See the README.txt or <http://trentm.com/projects/cmdln/> for more
35details.
36"""
37
38__version_info__ = (1, 1, 2)
39__version__ = '.'.join(map(str, __version_info__))
40
41import os
42import sys
43import re
44import cmd
45import optparse
46from pprint import pprint
47import sys
48
49
50
51
52#---- globals
53
54LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
55
56# An unspecified optional argument when None is a meaningful value.
57_NOT_SPECIFIED = ("Not", "Specified")
58
59# Pattern to match a TypeError message from a call that
60# failed because of incorrect number of arguments (see
61# Python/getargs.c).
62_INCORRECT_NUM_ARGS_RE = re.compile(
63 r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
64
65
66
67#---- exceptions
68
69class CmdlnError(Exception):
70 """A cmdln.py usage error."""
71 def __init__(self, msg):
72 self.msg = msg
73 def __str__(self):
74 return self.msg
75
76class CmdlnUserError(Exception):
77 """An error by a user of a cmdln-based tool/shell."""
78 pass
79
80
81
82#---- public methods and classes
83
84def alias(*aliases):
85 """Decorator to add aliases for Cmdln.do_* command handlers.
86
87 Example:
88 class MyShell(cmdln.Cmdln):
89 @cmdln.alias("!", "sh")
90 def do_shell(self, argv):
91 #...implement 'shell' command
92 """
93 def decorate(f):
94 if not hasattr(f, "aliases"):
95 f.aliases = []
96 f.aliases += aliases
97 return f
98 return decorate
99
100
101class RawCmdln(cmd.Cmd):
102 """An improved (on cmd.Cmd) framework for building multi-subcommand
103 scripts (think "svn" & "cvs") and simple shells (think "pdb" and
104 "gdb").
105
106 A simple example:
107
108 import cmdln
109
110 class MySVN(cmdln.RawCmdln):
111 name = "svn"
112
113 @cmdln.aliases('stat', 'st')
114 def do_status(self, argv):
115 print "handle 'svn status' command"
116
117 if __name__ == "__main__":
118 shell = MySVN()
119 retval = shell.main()
120 sys.exit(retval)
121
122 See <http://trentm.com/projects/cmdln> for more information.
123 """
124 name = None # if unset, defaults basename(sys.argv[0])
125 prompt = None # if unset, defaults to self.name+"> "
126 version = None # if set, default top-level options include --version
127
128 # Default messages for some 'help' command error cases.
129 # They are interpolated with one arg: the command.
130 nohelp = "no help on '%s'"
131 unknowncmd = "unknown command: '%s'"
132
133 helpindent = '' # string with which to indent help output
134
135 def __init__(self, completekey='tab',
136 stdin=None, stdout=None, stderr=None):
137 """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
138
139 The optional argument 'completekey' is the readline name of a
140 completion key; it defaults to the Tab key. If completekey is
141 not None and the readline module is available, command completion
142 is done automatically.
143
144 The optional arguments 'stdin', 'stdout' and 'stderr' specify
145 alternate input, output and error output file objects; if not
146 specified, sys.* are used.
147
148 If 'stdout' but not 'stderr' is specified, stdout is used for
149 error output. This is to provide least surprise for users used
150 to only the 'stdin' and 'stdout' options with cmd.Cmd.
151 """
152 import sys
153 if self.name is None:
154 self.name = os.path.basename(sys.argv[0])
155 if self.prompt is None:
156 self.prompt = self.name+"> "
157 self._name_str = self._str(self.name)
158 self._prompt_str = self._str(self.prompt)
159 if stdin is not None:
160 self.stdin = stdin
161 else:
162 self.stdin = sys.stdin
163 if stdout is not None:
164 self.stdout = stdout
165 else:
166 self.stdout = sys.stdout
167 if stderr is not None:
168 self.stderr = stderr
169 elif stdout is not None:
170 self.stderr = stdout
171 else:
172 self.stderr = sys.stderr
173 self.cmdqueue = []
174 self.completekey = completekey
175 self.cmdlooping = False
176
177 def get_optparser(self):
178 """Hook for subclasses to set the option parser for the
179 top-level command/shell.
180
181 This option parser is used retrieved and used by `.main()' to
182 handle top-level options.
183
184 The default implements a single '-h|--help' option. Sub-classes
185 can return None to have no options at the top-level. Typically
186 an instance of CmdlnOptionParser should be returned.
187 """
188 version = (self.version is not None
189 and "%s %s" % (self._name_str, self.version)
190 or None)
191 return CmdlnOptionParser(self, version=version)
192
193 def postoptparse(self):
194 """Hook method executed just after `.main()' parses top-level
195 options.
196
197 When called `self.options' holds the results of the option parse.
198 """
199 pass
200
201 def main(self, argv=None, loop=LOOP_NEVER):
202 """A possible mainline handler for a script, like so:
203
204 import cmdln
205 class MyCmd(cmdln.Cmdln):
206 name = "mycmd"
207 ...
208
209 if __name__ == "__main__":
210 MyCmd().main()
211
212 By default this will use sys.argv to issue a single command to
213 'MyCmd', then exit. The 'loop' argument can be use to control
214 interactive shell behaviour.
215
216 Arguments:
217 "argv" (optional, default sys.argv) is the command to run.
218 It must be a sequence, where the first element is the
219 command name and subsequent elements the args for that
220 command.
221 "loop" (optional, default LOOP_NEVER) is a constant
222 indicating if a command loop should be started (i.e. an
223 interactive shell). Valid values (constants on this module):
224 LOOP_ALWAYS start loop and run "argv", if any
225 LOOP_NEVER run "argv" (or .emptyline()) and exit
226 LOOP_IF_EMPTY run "argv", if given, and exit;
227 otherwise, start loop
228 """
229 if argv is None:
230 import sys
231 argv = sys.argv
232 else:
233 argv = argv[:] # don't modify caller's list
234
235 self.optparser = self.get_optparser()
236 if self.optparser: # i.e. optparser=None means don't process for opts
237 try:
238 self.options, args = self.optparser.parse_args(argv[1:])
239 except CmdlnUserError, ex:
240 msg = "%s: %s\nTry '%s help' for info.\n"\
241 % (self.name, ex, self.name)
242 self.stderr.write(self._str(msg))
243 self.stderr.flush()
244 return 1
245 except StopOptionProcessing, ex:
246 return 0
247 else:
248 self.options, args = None, argv[1:]
249 self.postoptparse()
250
251 if loop == LOOP_ALWAYS:
252 if args:
253 self.cmdqueue.append(args)
254 return self.cmdloop()
255 elif loop == LOOP_NEVER:
256 if args:
257 return self.cmd(args)
258 else:
259 return self.emptyline()
260 elif loop == LOOP_IF_EMPTY:
261 if args:
262 return self.cmd(args)
263 else:
264 return self.cmdloop()
265
266 def cmd(self, argv):
267 """Run one command and exit.
268
269 "argv" is the arglist for the command to run. argv[0] is the
270 command to run. If argv is an empty list then the
271 'emptyline' handler is run.
272
273 Returns the return value from the command handler.
274 """
275 assert isinstance(argv, (list, tuple)), \
276 "'argv' is not a sequence: %r" % argv
277 retval = None
278 try:
279 argv = self.precmd(argv)
280 retval = self.onecmd(argv)
281 self.postcmd(argv)
282 except:
283 if not self.cmdexc(argv):
284 raise
285 retval = 1
286 return retval
287
288 def _str(self, s):
289 """Safely convert the given str/unicode to a string for printing."""
290 try:
291 return str(s)
292 except UnicodeError:
293 #XXX What is the proper encoding to use here? 'utf-8' seems
294 # to work better than "getdefaultencoding" (usually
295 # 'ascii'), on OS X at least.
296 #import sys
297 #return s.encode(sys.getdefaultencoding(), "replace")
298 return s.encode("utf-8", "replace")
299
300 def cmdloop(self, intro=None):
301 """Repeatedly issue a prompt, accept input, parse into an argv, and
302 dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
303 the argv. In other words, start a shell.
304
305 "intro" (optional) is a introductory message to print when
306 starting the command loop. This overrides the class
307 "intro" attribute, if any.
308 """
309 self.cmdlooping = True
310 self.preloop()
311 if self.use_rawinput and self.completekey:
312 try:
313 import readline
314 self.old_completer = readline.get_completer()
315 readline.set_completer(self.complete)
316 readline.parse_and_bind(self.completekey+": complete")
317 except ImportError:
318 pass
319 try:
320 if intro is None:
321 intro = self.intro
322 if intro:
323 intro_str = self._str(intro)
324 self.stdout.write(intro_str+'\n')
325 self.stop = False
326 retval = None
327 while not self.stop:
328 if self.cmdqueue:
329 argv = self.cmdqueue.pop(0)
330 assert isinstance(argv, (list, tuple)), \
331 "item on 'cmdqueue' is not a sequence: %r" % argv
332 else:
333 if self.use_rawinput:
334 try:
335 line = raw_input(self._prompt_str)
336 except EOFError:
337 line = 'EOF'
338 else:
339 self.stdout.write(self._prompt_str)
340 self.stdout.flush()
341 line = self.stdin.readline()
342 if not len(line):
343 line = 'EOF'
344 else:
345 line = line[:-1] # chop '\n'
346 argv = line2argv(line)
347 try:
348 argv = self.precmd(argv)
349 retval = self.onecmd(argv)
350 self.postcmd(argv)
351 except:
352 if not self.cmdexc(argv):
353 raise
354 retval = 1
355 self.lastretval = retval
356 self.postloop()
357 finally:
358 if self.use_rawinput and self.completekey:
359 try:
360 import readline
361 readline.set_completer(self.old_completer)
362 except ImportError:
363 pass
364 self.cmdlooping = False
365 return retval
366
367 def precmd(self, argv):
368 """Hook method executed just before the command argv is
369 interpreted, but after the input prompt is generated and issued.
370
371 "argv" is the cmd to run.
372
373 Returns an argv to run (i.e. this method can modify the command
374 to run).
375 """
376 return argv
377
378 def postcmd(self, argv):
379 """Hook method executed just after a command dispatch is finished.
380
381 "argv" is the command that was run.
382 """
383 pass
384
385 def cmdexc(self, argv):
386 """Called if an exception is raised in any of precmd(), onecmd(),
387 or postcmd(). If True is returned, the exception is deemed to have
388 been dealt with. Otherwise, the exception is re-raised.
389
390 The default implementation handles CmdlnUserError's, which
391 typically correspond to user error in calling commands (as
392 opposed to programmer error in the design of the script using
393 cmdln.py).
394 """
395 import sys
396 type, exc, traceback = sys.exc_info()
397 if isinstance(exc, CmdlnUserError):
398 msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
399 % (self.name, argv[0], exc, self.name, argv[0])
400 self.stderr.write(self._str(msg))
401 self.stderr.flush()
402 return True
403
404 def onecmd(self, argv):
405 if not argv:
406 return self.emptyline()
407 self.lastcmd = argv
408 cmdname = self._get_canonical_cmd_name(argv[0])
409 if cmdname:
410 handler = self._get_cmd_handler(cmdname)
411 if handler:
412 return self._dispatch_cmd(handler, argv)
413 return self.default(argv)
414
415 def _dispatch_cmd(self, handler, argv):
416 return handler(argv)
417
418 def default(self, argv):
419 """Hook called to handle a command for which there is no handler.
420
421 "argv" is the command and arguments to run.
422
423 The default implementation writes and error message to stderr
424 and returns an error exit status.
425
426 Returns a numeric command exit status.
427 """
428 errmsg = self._str(self.unknowncmd % (argv[0],))
429 if self.cmdlooping:
430 self.stderr.write(errmsg+"\n")
431 else:
432 self.stderr.write("%s: %s\nTry '%s help' for info.\n"
433 % (self._name_str, errmsg, self._name_str))
434 self.stderr.flush()
435 return 1
436
437 def parseline(self, line):
438 # This is used by Cmd.complete (readline completer function) to
439 # massage the current line buffer before completion processing.
440 # We override to drop special '!' handling.
441 line = line.strip()
442 if not line:
443 return None, None, line
444 elif line[0] == '?':
445 line = 'help ' + line[1:]
446 i, n = 0, len(line)
447 while i < n and line[i] in self.identchars: i = i+1
448 cmd, arg = line[:i], line[i:].strip()
449 return cmd, arg, line
450
451 def helpdefault(self, cmd, known):
452 """Hook called to handle help on a command for which there is no
453 help handler.
454
455 "cmd" is the command name on which help was requested.
456 "known" is a boolean indicating if this command is known
457 (i.e. if there is a handler for it).
458
459 Returns a return code.
460 """
461 if known:
462 msg = self._str(self.nohelp % (cmd,))
463 if self.cmdlooping:
464 self.stderr.write(msg + '\n')
465 else:
466 self.stderr.write("%s: %s\n" % (self.name, msg))
467 else:
468 msg = self.unknowncmd % (cmd,)
469 if self.cmdlooping:
470 self.stderr.write(msg + '\n')
471 else:
472 self.stderr.write("%s: %s\n"
473 "Try '%s help' for info.\n"
474 % (self.name, msg, self.name))
475 self.stderr.flush()
476 return 1
477
478 def do_help(self, argv):
479 """${cmd_name}: give detailed help on a specific sub-command
480
481 Usage:
482 ${name} help [COMMAND]
483 """
484 if len(argv) > 1: # asking for help on a particular command
485 doc = None
486 cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
487 if not cmdname:
488 return self.helpdefault(argv[1], False)
489 else:
490 helpfunc = getattr(self, "help_"+cmdname, None)
491 if helpfunc:
492 doc = helpfunc()
493 else:
494 handler = self._get_cmd_handler(cmdname)
495 if handler:
496 doc = handler.__doc__
497 if doc is None:
498 return self.helpdefault(argv[1], handler != None)
499 else: # bare "help" command
500 doc = self.__class__.__doc__ # try class docstring
501 if doc is None:
502 # Try to provide some reasonable useful default help.
503 if self.cmdlooping: prefix = ""
504 else: prefix = self.name+' '
505 doc = """Usage:
506 %sCOMMAND [ARGS...]
507 %shelp [COMMAND]
508
509 ${option_list}
510 ${command_list}
511 ${help_list}
512 """ % (prefix, prefix)
513 cmdname = None
514
515 if doc: # *do* have help content, massage and print that
516 doc = self._help_reindent(doc)
517 doc = self._help_preprocess(doc, cmdname)
518 doc = doc.rstrip() + '\n' # trim down trailing space
519 self.stdout.write(self._str(doc))
520 self.stdout.flush()
521 do_help.aliases = ["?"]
522
523 def _help_reindent(self, help, indent=None):
524 """Hook to re-indent help strings before writing to stdout.
525
526 "help" is the help content to re-indent
527 "indent" is a string with which to indent each line of the
528 help content after normalizing. If unspecified or None
529 then the default is use: the 'self.helpindent' class
530 attribute. By default this is the empty string, i.e.
531 no indentation.
532
533 By default, all common leading whitespace is removed and then
534 the lot is indented by 'self.helpindent'. When calculating the
535 common leading whitespace the first line is ignored -- hence
536 help content for Conan can be written as follows and have the
537 expected indentation:
538
539 def do_crush(self, ...):
540 '''${cmd_name}: crush your enemies, see them driven before you...
541
542 c.f. Conan the Barbarian'''
543 """
544 if indent is None:
545 indent = self.helpindent
546 lines = help.splitlines(0)
547 _dedentlines(lines, skip_first_line=True)
548 lines = [(indent+line).rstrip() for line in lines]
549 return '\n'.join(lines)
550
551 def _help_preprocess(self, help, cmdname):
552 """Hook to preprocess a help string before writing to stdout.
553
554 "help" is the help string to process.
555 "cmdname" is the canonical sub-command name for which help
556 is being given, or None if the help is not specific to a
557 command.
558
559 By default the following template variables are interpolated in
560 help content. (Note: these are similar to Python 2.4's
561 string.Template interpolation but not quite.)
562
563 ${name}
564 The tool's/shell's name, i.e. 'self.name'.
565 ${option_list}
566 A formatted table of options for this shell/tool.
567 ${command_list}
568 A formatted table of available sub-commands.
569 ${help_list}
570 A formatted table of additional help topics (i.e. 'help_*'
571 methods with no matching 'do_*' method).
572 ${cmd_name}
573 The name (and aliases) for this sub-command formatted as:
574 "NAME (ALIAS1, ALIAS2, ...)".
575 ${cmd_usage}
576 A formatted usage block inferred from the command function
577 signature.
578 ${cmd_option_list}
579 A formatted table of options for this sub-command. (This is
580 only available for commands using the optparse integration,
581 i.e. using @cmdln.option decorators or manually setting the
582 'optparser' attribute on the 'do_*' method.)
583
584 Returns the processed help.
585 """
586 preprocessors = {
587 "${name}": self._help_preprocess_name,
588 "${option_list}": self._help_preprocess_option_list,
589 "${command_list}": self._help_preprocess_command_list,
590 "${help_list}": self._help_preprocess_help_list,
591 "${cmd_name}": self._help_preprocess_cmd_name,
592 "${cmd_usage}": self._help_preprocess_cmd_usage,
593 "${cmd_option_list}": self._help_preprocess_cmd_option_list,
594 }
595
596 for marker, preprocessor in preprocessors.items():
597 if marker in help:
598 help = preprocessor(help, cmdname)
599 return help
600
601 def _help_preprocess_name(self, help, cmdname=None):
602 return help.replace("${name}", self.name)
603
604 def _help_preprocess_option_list(self, help, cmdname=None):
605 marker = "${option_list}"
606 indent, indent_width = _get_indent(marker, help)
607 suffix = _get_trailing_whitespace(marker, help)
608
609 if self.optparser:
610 # Setup formatting options and format.
611 # - Indentation of 4 is better than optparse default of 2.
612 # C.f. Damian Conway's discussion of this in Perl Best
613 # Practices.
614 self.optparser.formatter.indent_increment = 4
615 self.optparser.formatter.current_indent = indent_width
616 block = self.optparser.format_option_help() + '\n'
617 else:
618 block = ""
619
620 help = help.replace(indent+marker+suffix, block, 1)
621 return help
622
623
624 def _help_preprocess_command_list(self, help, cmdname=None):
625 marker = "${command_list}"
626 indent, indent_width = _get_indent(marker, help)
627 suffix = _get_trailing_whitespace(marker, help)
628
629 # Find any aliases for commands.
630 token2canonical = self._get_canonical_map()
631 aliases = {}
632 for token, cmdname in token2canonical.items():
633 if token == cmdname: continue
634 aliases.setdefault(cmdname, []).append(token)
635
636 # Get the list of (non-hidden) commands and their
637 # documentation, if any.
638 cmdnames = {} # use a dict to strip duplicates
639 for attr in self.get_names():
640 if attr.startswith("do_"):
641 cmdnames[attr[3:]] = True
642 cmdnames = cmdnames.keys()
643 cmdnames.sort()
644 linedata = []
645 for cmdname in cmdnames:
646 if aliases.get(cmdname):
647 a = aliases[cmdname]
648 a.sort()
649 cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
650 else:
651 cmdstr = cmdname
652 doc = None
653 try:
654 helpfunc = getattr(self, 'help_'+cmdname)
655 except AttributeError:
656 handler = self._get_cmd_handler(cmdname)
657 if handler:
658 doc = handler.__doc__
659 else:
660 doc = helpfunc()
661
662 # Strip "${cmd_name}: " from the start of a command's doc. Best
663 # practice dictates that command help strings begin with this, but
664 # it isn't at all wanted for the command list.
665 to_strip = "${cmd_name}:"
666 if doc and doc.startswith(to_strip):
667 #log.debug("stripping %r from start of %s's help string",
668 # to_strip, cmdname)
669 doc = doc[len(to_strip):].lstrip()
670 linedata.append( (cmdstr, doc) )
671
672 if linedata:
673 subindent = indent + ' '*4
674 lines = _format_linedata(linedata, subindent, indent_width+4)
675 block = indent + "Commands:\n" \
676 + '\n'.join(lines) + "\n\n"
677 help = help.replace(indent+marker+suffix, block, 1)
678 return help
679
680 def _gen_names_and_attrs(self):
681 # Inheritance says we have to look in class and
682 # base classes; order is not important.
683 names = []
684 classes = [self.__class__]
685 while classes:
686 aclass = classes.pop(0)
687 if aclass.__bases__:
688 classes = classes + list(aclass.__bases__)
689 for name in dir(aclass):
690 yield (name, getattr(aclass, name))
691
692 def _help_preprocess_help_list(self, help, cmdname=None):
693 marker = "${help_list}"
694 indent, indent_width = _get_indent(marker, help)
695 suffix = _get_trailing_whitespace(marker, help)
696
697 # Determine the additional help topics, if any.
698 helpnames = {}
699 token2cmdname = self._get_canonical_map()
700 for attrname, attr in self._gen_names_and_attrs():
701 if not attrname.startswith("help_"): continue
702 helpname = attrname[5:]
703 if helpname not in token2cmdname:
704 helpnames[helpname] = attr
705
706 if helpnames:
707 linedata = [(n, a.__doc__ or "") for n, a in helpnames.items()]
708 linedata.sort()
709
710 subindent = indent + ' '*4
711 lines = _format_linedata(linedata, subindent, indent_width+4)
712 block = (indent
713 + "Additional help topics (run `%s help TOPIC'):\n" % self.name
714 + '\n'.join(lines)
715 + "\n\n")
716 else:
717 block = ''
718 help = help.replace(indent+marker+suffix, block, 1)
719 return help
720
721 def _help_preprocess_cmd_name(self, help, cmdname=None):
722 marker = "${cmd_name}"
723 handler = self._get_cmd_handler(cmdname)
724 if not handler:
725 raise CmdlnError("cannot preprocess '%s' into help string: "
726 "could not find command handler for %r"
727 % (marker, cmdname))
728 s = cmdname
729 if hasattr(handler, "aliases"):
730 s += " (%s)" % (", ".join(handler.aliases))
731 help = help.replace(marker, s)
732 return help
733
734 #TODO: this only makes sense as part of the Cmdln class.
735 # Add hooks to add help preprocessing template vars and put
736 # this one on that class.
737 def _help_preprocess_cmd_usage(self, help, cmdname=None):
738 marker = "${cmd_usage}"
739 handler = self._get_cmd_handler(cmdname)
740 if not handler:
741 raise CmdlnError("cannot preprocess '%s' into help string: "
742 "could not find command handler for %r"
743 % (marker, cmdname))
744 indent, indent_width = _get_indent(marker, help)
745 suffix = _get_trailing_whitespace(marker, help)
746
747 # Extract the introspection bits we need.
748 func = handler.im_func
749 if func.func_defaults:
750 func_defaults = list(func.func_defaults)
751 else:
752 func_defaults = []
753 co_argcount = func.func_code.co_argcount
754 co_varnames = func.func_code.co_varnames
755 co_flags = func.func_code.co_flags
756 CO_FLAGS_ARGS = 4
757 CO_FLAGS_KWARGS = 8
758
759 # Adjust argcount for possible *args and **kwargs arguments.
760 argcount = co_argcount
761 if co_flags & CO_FLAGS_ARGS: argcount += 1
762 if co_flags & CO_FLAGS_KWARGS: argcount += 1
763
764 # Determine the usage string.
765 usage = "%s %s" % (self.name, cmdname)
766 if argcount <= 2: # handler ::= do_FOO(self, argv)
767 usage += " [ARGS...]"
768 elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
769 argnames = list(co_varnames[3:argcount])
770 tail = ""
771 if co_flags & CO_FLAGS_KWARGS:
772 name = argnames.pop(-1)
773 import warnings
774 # There is no generally accepted mechanism for passing
775 # keyword arguments from the command line. Could
776 # *perhaps* consider: arg=value arg2=value2 ...
777 warnings.warn("argument '**%s' on '%s.%s' command "
778 "handler will never get values"
779 % (name, self.__class__.__name__,
780 func.func_name))
781 if co_flags & CO_FLAGS_ARGS:
782 name = argnames.pop(-1)
783 tail = "[%s...]" % name.upper()
784 while func_defaults:
785 func_defaults.pop(-1)
786 name = argnames.pop(-1)
787 tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
788 while argnames:
789 name = argnames.pop(-1)
790 tail = "%s %s" % (name.upper(), tail)
791 usage += ' ' + tail
792
793 block_lines = [
794 self.helpindent + "Usage:",
795 self.helpindent + ' '*4 + usage
796 ]
797 block = '\n'.join(block_lines) + '\n\n'
798
799 help = help.replace(indent+marker+suffix, block, 1)
800 return help
801
802 #TODO: this only makes sense as part of the Cmdln class.
803 # Add hooks to add help preprocessing template vars and put
804 # this one on that class.
805 def _help_preprocess_cmd_option_list(self, help, cmdname=None):
806 marker = "${cmd_option_list}"
807 handler = self._get_cmd_handler(cmdname)
808 if not handler:
809 raise CmdlnError("cannot preprocess '%s' into help string: "
810 "could not find command handler for %r"
811 % (marker, cmdname))
812 indent, indent_width = _get_indent(marker, help)
813 suffix = _get_trailing_whitespace(marker, help)
814 if hasattr(handler, "optparser"):
815 # Setup formatting options and format.
816 # - Indentation of 4 is better than optparse default of 2.
817 # C.f. Damian Conway's discussion of this in Perl Best
818 # Practices.
819 handler.optparser.formatter.indent_increment = 4
820 handler.optparser.formatter.current_indent = indent_width
821 block = handler.optparser.format_option_help() + '\n'
822 else:
823 block = ""
824
825 help = help.replace(indent+marker+suffix, block, 1)
826 return help
827
828 def _get_canonical_cmd_name(self, token):
829 map = self._get_canonical_map()
830 return map.get(token, None)
831
832 def _get_canonical_map(self):
833 """Return a mapping of available command names and aliases to
834 their canonical command name.
835 """
836 cacheattr = "_token2canonical"
837 if not hasattr(self, cacheattr):
838 # Get the list of commands and their aliases, if any.
839 token2canonical = {}
840 cmd2funcname = {} # use a dict to strip duplicates
841 for attr in self.get_names():
842 if attr.startswith("do_"): cmdname = attr[3:]
843 elif attr.startswith("_do_"): cmdname = attr[4:]
844 else:
845 continue
846 cmd2funcname[cmdname] = attr
847 token2canonical[cmdname] = cmdname
848 for cmdname, funcname in cmd2funcname.items(): # add aliases
849 func = getattr(self, funcname)
850 aliases = getattr(func, "aliases", [])
851 for alias in aliases:
852 if alias in cmd2funcname:
853 import warnings
854 warnings.warn("'%s' alias for '%s' command conflicts "
855 "with '%s' handler"
856 % (alias, cmdname, cmd2funcname[alias]))
857 continue
858 token2canonical[alias] = cmdname
859 setattr(self, cacheattr, token2canonical)
860 return getattr(self, cacheattr)
861
862 def _get_cmd_handler(self, cmdname):
863 handler = None
864 try:
865 handler = getattr(self, 'do_' + cmdname)
866 except AttributeError:
867 try:
868 # Private command handlers begin with "_do_".
869 handler = getattr(self, '_do_' + cmdname)
870 except AttributeError:
871 pass
872 return handler
873
874 def _do_EOF(self, argv):
875 # Default EOF handler
876 # Note: an actual EOF is redirected to this command.
877 #TODO: separate name for this. Currently it is available from
878 # command-line. Is that okay?
879 self.stdout.write('\n')
880 self.stdout.flush()
881 self.stop = True
882
883 def emptyline(self):
884 # Different from cmd.Cmd: don't repeat the last command for an
885 # emptyline.
886 if self.cmdlooping:
887 pass
888 else:
889 return self.do_help(["help"])
890
891
892#---- optparse.py extension to fix (IMO) some deficiencies
893#
894# See the class _OptionParserEx docstring for details.
895#
896
897class StopOptionProcessing(Exception):
898 """Indicate that option *and argument* processing should stop
899 cleanly. This is not an error condition. It is similar in spirit to
900 StopIteration. This is raised by _OptionParserEx's default "help"
901 and "version" option actions and can be raised by custom option
902 callbacks too.
903
904 Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
905 usage is:
906
907 parser = CmdlnOptionParser(mycmd)
908 parser.add_option("-f", "--force", dest="force")
909 ...
910 try:
911 opts, args = parser.parse_args()
912 except StopOptionProcessing:
913 # normal termination, "--help" was probably given
914 sys.exit(0)
915 """
916
917class _OptionParserEx(optparse.OptionParser):
918 """An optparse.OptionParser that uses exceptions instead of sys.exit.
919
920 This class is an extension of optparse.OptionParser that differs
921 as follows:
922 - Correct (IMO) the default OptionParser error handling to never
923 sys.exit(). Instead OptParseError exceptions are passed through.
924 - Add the StopOptionProcessing exception (a la StopIteration) to
925 indicate normal termination of option processing.
926 See StopOptionProcessing's docstring for details.
927
928 I'd also like to see the following in the core optparse.py, perhaps
929 as a RawOptionParser which would serve as a base class for the more
930 generally used OptionParser (that works as current):
931 - Remove the implicit addition of the -h|--help and --version
932 options. They can get in the way (e.g. if want '-?' and '-V' for
933 these as well) and it is not hard to do:
934 optparser.add_option("-h", "--help", action="help")
935 optparser.add_option("--version", action="version")
936 These are good practices, just not valid defaults if they can
937 get in the way.
938 """
939 def error(self, msg):
940 raise optparse.OptParseError(msg)
941
942 def exit(self, status=0, msg=None):
943 if status == 0:
944 raise StopOptionProcessing(msg)
945 else:
946 #TODO: don't lose status info here
947 raise optparse.OptParseError(msg)
948
949
950
951#---- optparse.py-based option processing support
952
953class CmdlnOptionParser(_OptionParserEx):
954 """An optparse.OptionParser class more appropriate for top-level
955 Cmdln options. For parsing of sub-command options, see
956 SubCmdOptionParser.
957
958 Changes:
959 - disable_interspersed_args() by default, because a Cmdln instance
960 has sub-commands which may themselves have options.
961 - Redirect print_help() to the Cmdln.do_help() which is better
962 equiped to handle the "help" action.
963 - error() will raise a CmdlnUserError: OptionParse.error() is meant
964 to be called for user errors. Raising a well-known error here can
965 make error handling clearer.
966 - Also see the changes in _OptionParserEx.
967 """
968 def __init__(self, cmdln, **kwargs):
969 self.cmdln = cmdln
970 kwargs["prog"] = self.cmdln.name
971 _OptionParserEx.__init__(self, **kwargs)
972 self.disable_interspersed_args()
973
974 def print_help(self, file=None):
975 self.cmdln.onecmd(["help"])
976
977 def error(self, msg):
978 raise CmdlnUserError(msg)
979
980
981class SubCmdOptionParser(_OptionParserEx):
982 def set_cmdln_info(self, cmdln, subcmd):
983 """Called by Cmdln to pass relevant info about itself needed
984 for print_help().
985 """
986 self.cmdln = cmdln
987 self.subcmd = subcmd
988
989 def print_help(self, file=None):
990 self.cmdln.onecmd(["help", self.subcmd])
991
992 def error(self, msg):
993 raise CmdlnUserError(msg)
994
995
996def option(*args, **kwargs):
997 """Decorator to add an option to the optparser argument of a Cmdln
998 subcommand.
999
1000 Example:
1001 class MyShell(cmdln.Cmdln):
1002 @cmdln.option("-f", "--force", help="force removal")
1003 def do_remove(self, subcmd, opts, *args):
1004 #...
1005 """
1006 #XXX Is there a possible optimization for many options to not have a
1007 # large stack depth here?
1008 def decorate(f):
1009 if not hasattr(f, "optparser"):
1010 f.optparser = SubCmdOptionParser()
1011 f.optparser.add_option(*args, **kwargs)
1012 return f
1013 return decorate
1014
1015
1016class Cmdln(RawCmdln):
1017 """An improved (on cmd.Cmd) framework for building multi-subcommand
1018 scripts (think "svn" & "cvs") and simple shells (think "pdb" and
1019 "gdb").
1020
1021 A simple example:
1022
1023 import cmdln
1024
1025 class MySVN(cmdln.Cmdln):
1026 name = "svn"
1027
1028 @cmdln.aliases('stat', 'st')
1029 @cmdln.option('-v', '--verbose', action='store_true'
1030 help='print verbose information')
1031 def do_status(self, subcmd, opts, *paths):
1032 print "handle 'svn status' command"
1033
1034 #...
1035
1036 if __name__ == "__main__":
1037 shell = MySVN()
1038 retval = shell.main()
1039 sys.exit(retval)
1040
1041 'Cmdln' extends 'RawCmdln' by providing optparse option processing
1042 integration. See this class' _dispatch_cmd() docstring and
1043 <http://trentm.com/projects/cmdln> for more information.
1044 """
1045 def _dispatch_cmd(self, handler, argv):
1046 """Introspect sub-command handler signature to determine how to
1047 dispatch the command. The raw handler provided by the base
1048 'RawCmdln' class is still supported:
1049
1050 def do_foo(self, argv):
1051 # 'argv' is the vector of command line args, argv[0] is
1052 # the command name itself (i.e. "foo" or an alias)
1053 pass
1054
1055 In addition, if the handler has more than 2 arguments option
1056 processing is automatically done (using optparse):
1057
1058 @cmdln.option('-v', '--verbose', action='store_true')
1059 def do_bar(self, subcmd, opts, *args):
1060 # subcmd = <"bar" or an alias>
1061 # opts = <an optparse.Values instance>
1062 if opts.verbose:
1063 print "lots of debugging output..."
1064 # args = <tuple of arguments>
1065 for arg in args:
1066 bar(arg)
1067
1068 TODO: explain that "*args" can be other signatures as well.
1069
1070 The `cmdln.option` decorator corresponds to an `add_option()`
1071 method call on an `optparse.OptionParser` instance.
1072
1073 You can declare a specific number of arguments:
1074
1075 @cmdln.option('-v', '--verbose', action='store_true')
1076 def do_bar2(self, subcmd, opts, bar_one, bar_two):
1077 #...
1078
1079 and an appropriate error message will be raised/printed if the
1080 command is called with a different number of args.
1081 """
1082 co_argcount = handler.im_func.func_code.co_argcount
1083 if co_argcount == 2: # handler ::= do_foo(self, argv)
1084 return handler(argv)
1085 elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
1086 try:
1087 optparser = handler.optparser
1088 except AttributeError:
1089 optparser = handler.im_func.optparser = SubCmdOptionParser()
1090 assert isinstance(optparser, SubCmdOptionParser)
1091 optparser.set_cmdln_info(self, argv[0])
1092 try:
1093 opts, args = optparser.parse_args(argv[1:])
1094 except StopOptionProcessing:
1095 #TODO: this doesn't really fly for a replacement of
1096 # optparse.py behaviour, does it?
1097 return 0 # Normal command termination
1098
1099 try:
1100 return handler(argv[0], opts, *args)
1101 except TypeError, ex:
1102 # Some TypeError's are user errors:
1103 # do_foo() takes at least 4 arguments (3 given)
1104 # do_foo() takes at most 5 arguments (6 given)
1105 # do_foo() takes exactly 5 arguments (6 given)
1106 # Raise CmdlnUserError for these with a suitably
1107 # massaged error message.
1108 import sys
1109 tb = sys.exc_info()[2] # the traceback object
1110 if tb.tb_next is not None:
1111 # If the traceback is more than one level deep, then the
1112 # TypeError do *not* happen on the "handler(...)" call
1113 # above. In that we don't want to handle it specially
1114 # here: it would falsely mask deeper code errors.
1115 raise
1116 msg = ex.args[0]
1117 match = _INCORRECT_NUM_ARGS_RE.search(msg)
1118 if match:
1119 msg = list(match.groups())
1120 msg[1] = int(msg[1]) - 3
1121 if msg[1] == 1:
1122 msg[2] = msg[2].replace("arguments", "argument")
1123 msg[3] = int(msg[3]) - 3
1124 msg = ''.join(map(str, msg))
1125 raise CmdlnUserError(msg)
1126 else:
1127 raise
1128 else:
1129 raise CmdlnError("incorrect argcount for %s(): takes %d, must "
1130 "take 2 for 'argv' signature or 3+ for 'opts' "
1131 "signature" % (handler.__name__, co_argcount))
1132
1133
1134
1135#---- internal support functions
1136
1137def _format_linedata(linedata, indent, indent_width):
1138 """Format specific linedata into a pleasant layout.
1139
1140 "linedata" is a list of 2-tuples of the form:
1141 (<item-display-string>, <item-docstring>)
1142 "indent" is a string to use for one level of indentation
1143 "indent_width" is a number of columns by which the
1144 formatted data will be indented when printed.
1145
1146 The <item-display-string> column is held to 15 columns.
1147 """
1148 lines = []
1149 WIDTH = 78 - indent_width
1150 SPACING = 2
1151 NAME_WIDTH_LOWER_BOUND = 13
1152 NAME_WIDTH_UPPER_BOUND = 16
1153 NAME_WIDTH = max([len(s) for s,d in linedata])
1154 if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
1155 NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
1156 else:
1157 NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
1158
1159 DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
1160 for namestr, doc in linedata:
1161 line = indent + namestr
1162 if len(namestr) <= NAME_WIDTH:
1163 line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
1164 else:
1165 lines.append(line)
1166 line = indent + ' ' * (NAME_WIDTH + SPACING)
1167 line += _summarize_doc(doc, DOC_WIDTH)
1168 lines.append(line.rstrip())
1169 return lines
1170
1171def _summarize_doc(doc, length=60):
1172 r"""Parse out a short one line summary from the given doclines.
1173
1174 "doc" is the doc string to summarize.
1175 "length" is the max length for the summary
1176
1177 >>> _summarize_doc("this function does this")
1178 'this function does this'
1179 >>> _summarize_doc("this function does this", 10)
1180 'this fu...'
1181 >>> _summarize_doc("this function does this\nand that")
1182 'this function does this and that'
1183 >>> _summarize_doc("this function does this\n\nand that")
1184 'this function does this'
1185 """
1186 import re
1187 if doc is None:
1188 return ""
1189 assert length > 3, "length <= 3 is absurdly short for a doc summary"
1190 doclines = doc.strip().splitlines(0)
1191 if not doclines:
1192 return ""
1193
1194 summlines = []
1195 for i, line in enumerate(doclines):
1196 stripped = line.strip()
1197 if not stripped:
1198 break
1199 summlines.append(stripped)
1200 if len(''.join(summlines)) >= length:
1201 break
1202
1203 summary = ' '.join(summlines)
1204 if len(summary) > length:
1205 summary = summary[:length-3] + "..."
1206 return summary
1207
1208
1209def line2argv(line):
1210 r"""Parse the given line into an argument vector.
1211
1212 "line" is the line of input to parse.
1213
1214 This may get niggly when dealing with quoting and escaping. The
1215 current state of this parsing may not be completely thorough/correct
1216 in this respect.
1217
1218 >>> from cmdln import line2argv
1219 >>> line2argv("foo")
1220 ['foo']
1221 >>> line2argv("foo bar")
1222 ['foo', 'bar']
1223 >>> line2argv("foo bar ")
1224 ['foo', 'bar']
1225 >>> line2argv(" foo bar")
1226 ['foo', 'bar']
1227
1228 Quote handling:
1229
1230 >>> line2argv("'foo bar'")
1231 ['foo bar']
1232 >>> line2argv('"foo bar"')
1233 ['foo bar']
1234 >>> line2argv(r'"foo\"bar"')
1235 ['foo"bar']
1236 >>> line2argv("'foo bar' spam")
1237 ['foo bar', 'spam']
1238 >>> line2argv("'foo 'bar spam")
1239 ['foo bar', 'spam']
1240
1241 >>> line2argv('some\tsimple\ttests')
1242 ['some', 'simple', 'tests']
1243 >>> line2argv('a "more complex" test')
1244 ['a', 'more complex', 'test']
1245 >>> line2argv('a more="complex test of " quotes')
1246 ['a', 'more=complex test of ', 'quotes']
1247 >>> line2argv('a more" complex test of " quotes')
1248 ['a', 'more complex test of ', 'quotes']
1249 >>> line2argv('an "embedded \\"quote\\""')
1250 ['an', 'embedded "quote"']
1251
1252 # Komodo bug 48027
1253 >>> line2argv('foo bar C:\\')
1254 ['foo', 'bar', 'C:\\']
1255
1256 # Komodo change 127581
1257 >>> line2argv(r'"\test\slash" "foo bar" "foo\"bar"')
1258 ['\\test\\slash', 'foo bar', 'foo"bar']
1259
1260 # Komodo change 127629
1261 >>> if sys.platform == "win32":
1262 ... line2argv(r'\foo\bar') == ['\\foo\\bar']
1263 ... line2argv(r'\\foo\\bar') == ['\\\\foo\\\\bar']
1264 ... line2argv('"foo') == ['foo']
1265 ... else:
1266 ... line2argv(r'\foo\bar') == ['foobar']
1267 ... line2argv(r'\\foo\\bar') == ['\\foo\\bar']
1268 ... try:
1269 ... line2argv('"foo')
1270 ... except ValueError, ex:
1271 ... "not terminated" in str(ex)
1272 True
1273 True
1274 True
1275 """
1276 import string
1277 line = line.strip()
1278 argv = []
1279 state = "default"
1280 arg = None # the current argument being parsed
1281 i = -1
1282 while 1:
1283 i += 1
1284 if i >= len(line): break
1285 ch = line[i]
1286
1287 if ch == "\\" and i+1 < len(line):
1288 # escaped char always added to arg, regardless of state
1289 if arg is None: arg = ""
1290 if (sys.platform == "win32"
1291 or state in ("double-quoted", "single-quoted")
1292 ) and line[i+1] not in tuple('"\''):
1293 arg += ch
1294 i += 1
1295 arg += line[i]
1296 continue
1297
1298 if state == "single-quoted":
1299 if ch == "'":
1300 state = "default"
1301 else:
1302 arg += ch
1303 elif state == "double-quoted":
1304 if ch == '"':
1305 state = "default"
1306 else:
1307 arg += ch
1308 elif state == "default":
1309 if ch == '"':
1310 if arg is None: arg = ""
1311 state = "double-quoted"
1312 elif ch == "'":
1313 if arg is None: arg = ""
1314 state = "single-quoted"
1315 elif ch in string.whitespace:
1316 if arg is not None:
1317 argv.append(arg)
1318 arg = None
1319 else:
1320 if arg is None: arg = ""
1321 arg += ch
1322 if arg is not None:
1323 argv.append(arg)
1324 if not sys.platform == "win32" and state != "default":
1325 raise ValueError("command line is not terminated: unfinished %s "
1326 "segment" % state)
1327 return argv
1328
1329
1330def argv2line(argv):
1331 r"""Put together the given argument vector into a command line.
1332
1333 "argv" is the argument vector to process.
1334
1335 >>> from cmdln import argv2line
1336 >>> argv2line(['foo'])
1337 'foo'
1338 >>> argv2line(['foo', 'bar'])
1339 'foo bar'
1340 >>> argv2line(['foo', 'bar baz'])
1341 'foo "bar baz"'
1342 >>> argv2line(['foo"bar'])
1343 'foo"bar'
1344 >>> print argv2line(['foo" bar'])
1345 'foo" bar'
1346 >>> print argv2line(["foo' bar"])
1347 "foo' bar"
1348 >>> argv2line(["foo'bar"])
1349 "foo'bar"
1350 """
1351 escapedArgs = []
1352 for arg in argv:
1353 if ' ' in arg and '"' not in arg:
1354 arg = '"'+arg+'"'
1355 elif ' ' in arg and "'" not in arg:
1356 arg = "'"+arg+"'"
1357 elif ' ' in arg:
1358 arg = arg.replace('"', r'\"')
1359 arg = '"'+arg+'"'
1360 escapedArgs.append(arg)
1361 return ' '.join(escapedArgs)
1362
1363
1364# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
1365def _dedentlines(lines, tabsize=8, skip_first_line=False):
1366 """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
1367
1368 "lines" is a list of lines to dedent.
1369 "tabsize" is the tab width to use for indent width calculations.
1370 "skip_first_line" is a boolean indicating if the first line should
1371 be skipped for calculating the indent width and for dedenting.
1372 This is sometimes useful for docstrings and similar.
1373
1374 Same as dedent() except operates on a sequence of lines. Note: the
1375 lines list is modified **in-place**.
1376 """
1377 DEBUG = False
1378 if DEBUG:
1379 print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
1380 % (tabsize, skip_first_line)
1381 indents = []
1382 margin = None
1383 for i, line in enumerate(lines):
1384 if i == 0 and skip_first_line: continue
1385 indent = 0
1386 for ch in line:
1387 if ch == ' ':
1388 indent += 1
1389 elif ch == '\t':
1390 indent += tabsize - (indent % tabsize)
1391 elif ch in '\r\n':
1392 continue # skip all-whitespace lines
1393 else:
1394 break
1395 else:
1396 continue # skip all-whitespace lines
1397 if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
1398 if margin is None:
1399 margin = indent
1400 else:
1401 margin = min(margin, indent)
1402 if DEBUG: print "dedent: margin=%r" % margin
1403
1404 if margin is not None and margin > 0:
1405 for i, line in enumerate(lines):
1406 if i == 0 and skip_first_line: continue
1407 removed = 0
1408 for j, ch in enumerate(line):
1409 if ch == ' ':
1410 removed += 1
1411 elif ch == '\t':
1412 removed += tabsize - (removed % tabsize)
1413 elif ch in '\r\n':
1414 if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
1415 lines[i] = lines[i][j:]
1416 break
1417 else:
1418 raise ValueError("unexpected non-whitespace char %r in "
1419 "line %r while removing %d-space margin"
1420 % (ch, line, margin))
1421 if DEBUG:
1422 print "dedent: %r: %r -> removed %d/%d"\
1423 % (line, ch, removed, margin)
1424 if removed == margin:
1425 lines[i] = lines[i][j+1:]
1426 break
1427 elif removed > margin:
1428 lines[i] = ' '*(removed-margin) + lines[i][j+1:]
1429 break
1430 return lines
1431
1432def _dedent(text, tabsize=8, skip_first_line=False):
1433 """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
1434
1435 "text" is the text to dedent.
1436 "tabsize" is the tab width to use for indent width calculations.
1437 "skip_first_line" is a boolean indicating if the first line should
1438 be skipped for calculating the indent width and for dedenting.
1439 This is sometimes useful for docstrings and similar.
1440
1441 textwrap.dedent(s), but don't expand tabs to spaces
1442 """
1443 lines = text.splitlines(1)
1444 _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
1445 return ''.join(lines)
1446
1447
1448def _get_indent(marker, s, tab_width=8):
1449 """_get_indent(marker, s, tab_width=8) ->
1450 (<indentation-of-'marker'>, <indentation-width>)"""
1451 # Figure out how much the marker is indented.
1452 INDENT_CHARS = tuple(' \t')
1453 start = s.index(marker)
1454 i = start
1455 while i > 0:
1456 if s[i-1] not in INDENT_CHARS:
1457 break
1458 i -= 1
1459 indent = s[i:start]
1460 indent_width = 0
1461 for ch in indent:
1462 if ch == ' ':
1463 indent_width += 1
1464 elif ch == '\t':
1465 indent_width += tab_width - (indent_width % tab_width)
1466 return indent, indent_width
1467
1468def _get_trailing_whitespace(marker, s):
1469 """Return the whitespace content trailing the given 'marker' in string 's',
1470 up to and including a newline.
1471 """
1472 suffix = ''
1473 start = s.index(marker) + len(marker)
1474 i = start
1475 while i < len(s):
1476 if s[i] in ' \t':
1477 suffix += s[i]
1478 elif s[i] in '\r\n':
1479 suffix += s[i]
1480 if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
1481 suffix += s[i+1]
1482 break
1483 else:
1484 break
1485 i += 1
1486 return suffix
1487
1488
1489
1490#---- bash completion support
1491# Note: This is still experimental. I expect to change this
1492# significantly.
1493#
1494# To get Bash completion for a cmdln.Cmdln class, run the following
1495# bash command:
1496# $ complete -C 'python -m cmdln /path/to/script.py CmdlnClass' cmdname
1497# For example:
1498# $ complete -C 'python -m cmdln ~/bin/svn.py SVN' svn
1499#
1500#TODO: Simplify the above so don't have to given path to script (try to
1501# find it on PATH, if possible). Could also make class name
1502# optional if there is only one in the module (common case).
1503
1504if __name__ == "__main__" and len(sys.argv) == 6:
1505 def _log(s):
1506 return # no-op, comment out for debugging
1507 from os.path import expanduser
1508 fout = open(expanduser("~/tmp/bashcpln.log"), 'a')
1509 fout.write(str(s) + '\n')
1510 fout.close()
1511
1512 # Recipe: module_from_path (1.0.1+)
1513 def _module_from_path(path):
1514 import imp, os, sys
1515 path = os.path.expanduser(path)
1516 dir = os.path.dirname(path) or os.curdir
1517 name = os.path.splitext(os.path.basename(path))[0]
1518 sys.path.insert(0, dir)
1519 try:
1520 iinfo = imp.find_module(name, [dir])
1521 return imp.load_module(name, *iinfo)
1522 finally:
1523 sys.path.remove(dir)
1524
1525 def _get_bash_cplns(script_path, class_name, cmd_name,
1526 token, preceding_token):
1527 _log('--')
1528 _log('get_cplns(%r, %r, %r, %r, %r)'
1529 % (script_path, class_name, cmd_name, token, preceding_token))
1530 comp_line = os.environ["COMP_LINE"]
1531 comp_point = int(os.environ["COMP_POINT"])
1532 _log("COMP_LINE: %r" % comp_line)
1533 _log("COMP_POINT: %r" % comp_point)
1534
1535 try:
1536 script = _module_from_path(script_path)
1537 except ImportError, ex:
1538 _log("error importing `%s': %s" % (script_path, ex))
1539 return []
1540 shell = getattr(script, class_name)()
1541 cmd_map = shell._get_canonical_map()
1542 del cmd_map["EOF"]
1543
1544 # Determine if completing the sub-command name.
1545 parts = comp_line[:comp_point].split(None, 1)
1546 _log(parts)
1547 if len(parts) == 1 or not (' ' in parts[1] or '\t' in parts[1]):
1548 #TODO: if parts[1].startswith('-'): handle top-level opts
1549 _log("complete sub-command names")
1550 matches = {}
1551 for name, canon_name in cmd_map.items():
1552 if name.startswith(token):
1553 matches[name] = canon_name
1554 if not matches:
1555 return []
1556 elif len(matches) == 1:
1557 return matches.keys()
1558 elif len(set(matches.values())) == 1:
1559 return [matches.values()[0]]
1560 else:
1561 return matches.keys()
1562
1563 # Otherwise, complete options for the given sub-command.
1564 #TODO: refine this so it does the right thing with option args
1565 if token.startswith('-'):
1566 cmd_name = comp_line.split(None, 2)[1]
1567 try:
1568 cmd_canon_name = cmd_map[cmd_name]
1569 except KeyError:
1570 return []
1571 handler = shell._get_cmd_handler(cmd_canon_name)
1572 optparser = getattr(handler, "optparser", None)
1573 if optparser is None:
1574 optparser = SubCmdOptionParser()
1575 opt_strs = []
1576 for option in optparser.option_list:
1577 for opt_str in option._short_opts + option._long_opts:
1578 if opt_str.startswith(token):
1579 opt_strs.append(opt_str)
1580 return opt_strs
1581
1582 return []
1583
1584 for cpln in _get_bash_cplns(*sys.argv[1:]):
1585 print cpln
1586
diff --git a/scripts/lib/mic/utils/errors.py b/scripts/lib/mic/utils/errors.py
new file mode 100644
index 0000000000..8d720f9080
--- /dev/null
+++ b/scripts/lib/mic/utils/errors.py
@@ -0,0 +1,71 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2007 Red Hat, Inc.
4# Copyright (c) 2011 Intel, Inc.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms of the GNU General Public License as published by the Free
8# Software Foundation; version 2 of the License
9#
10# This program is distributed in the hope that it will be useful, but
11# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13# for more details.
14#
15# You should have received a copy of the GNU General Public License along
16# with this program; if not, write to the Free Software Foundation, Inc., 59
17# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19class CreatorError(Exception):
20 """An exception base class for all imgcreate errors."""
21 keyword = '<creator>'
22
23 def __init__(self, msg):
24 self.msg = msg
25
26 def __str__(self):
27 if isinstance(self.msg, unicode):
28 self.msg = self.msg.encode('utf-8', 'ignore')
29 else:
30 self.msg = str(self.msg)
31 return self.keyword + self.msg
32
33class Usage(CreatorError):
34 keyword = '<usage>'
35
36 def __str__(self):
37 if isinstance(self.msg, unicode):
38 self.msg = self.msg.encode('utf-8', 'ignore')
39 else:
40 self.msg = str(self.msg)
41 return self.keyword + self.msg + ', please use "--help" for more info'
42
43class Abort(CreatorError):
44 keyword = ''
45
46class ConfigError(CreatorError):
47 keyword = '<config>'
48
49class KsError(CreatorError):
50 keyword = '<kickstart>'
51
52class RepoError(CreatorError):
53 keyword = '<repo>'
54
55class RpmError(CreatorError):
56 keyword = '<rpm>'
57
58class MountError(CreatorError):
59 keyword = '<mount>'
60
61class SnapshotError(CreatorError):
62 keyword = '<snapshot>'
63
64class SquashfsError(CreatorError):
65 keyword = '<squashfs>'
66
67class BootstrapError(CreatorError):
68 keyword = '<bootstrap>'
69
70class RuntimeError(CreatorError):
71 keyword = '<runtime>'
diff --git a/scripts/lib/mic/utils/fs_related.py b/scripts/lib/mic/utils/fs_related.py
new file mode 100644
index 0000000000..dd420e88dc
--- /dev/null
+++ b/scripts/lib/mic/utils/fs_related.py
@@ -0,0 +1,1060 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2007, Red Hat, Inc.
4# Copyright (c) 2009, 2010, 2011 Intel, Inc.
5#
6# This program is free software; you can redistribute it and/or modify it
7# under the terms of the GNU General Public License as published by the Free
8# Software Foundation; version 2 of the License
9#
10# This program is distributed in the hope that it will be useful, but
11# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13# for more details.
14#
15# You should have received a copy of the GNU General Public License along
16# with this program; if not, write to the Free Software Foundation, Inc., 59
17# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19from __future__ import with_statement
20import os
21import sys
22import errno
23import stat
24import random
25import string
26import time
27import uuid
28
29from mic import msger
30from mic.utils import runner
31from mic.utils.errors import *
32from mic.utils.oe.misc import *
33
34def find_binary_inchroot(binary, chroot):
35 paths = ["/usr/sbin",
36 "/usr/bin",
37 "/sbin",
38 "/bin"
39 ]
40
41 for path in paths:
42 bin_path = "%s/%s" % (path, binary)
43 if os.path.exists("%s/%s" % (chroot, bin_path)):
44 return bin_path
45 return None
46
47def find_binary_path(binary):
48 if os.environ.has_key("PATH"):
49 paths = os.environ["PATH"].split(":")
50 else:
51 paths = []
52 if os.environ.has_key("HOME"):
53 paths += [os.environ["HOME"] + "/bin"]
54 paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]
55
56 for path in paths:
57 bin_path = "%s/%s" % (path, binary)
58 if os.path.exists(bin_path):
59 return bin_path
60
61 print "External command '%s' not found, exiting." % binary
62 print " (Please install '%s' on your host system)" % binary
63 sys.exit(1)
64
65def makedirs(dirname):
66 """A version of os.makedirs() that doesn't throw an
67 exception if the leaf directory already exists.
68 """
69 try:
70 os.makedirs(dirname)
71 except OSError, err:
72 if err.errno != errno.EEXIST:
73 raise
74
75def mksquashfs(in_img, out_img):
76 fullpathmksquashfs = find_binary_path("mksquashfs")
77 args = [fullpathmksquashfs, in_img, out_img]
78
79 if not sys.stdout.isatty():
80 args.append("-no-progress")
81
82 ret = runner.show(args)
83 if ret != 0:
84 raise SquashfsError("'%s' exited with error (%d)" % (' '.join(args), ret))
85
86def resize2fs(fs, size):
87 resize2fs = find_binary_path("resize2fs")
88 if size == 0:
89 # it means to minimalize it
90 return runner.show([resize2fs, '-M', fs])
91 else:
92 return runner.show([resize2fs, fs, "%sK" % (size / 1024,)])
93
94def my_fuser(fp):
95 fuser = find_binary_path("fuser")
96 if not os.path.exists(fp):
97 return False
98
99 rc = runner.quiet([fuser, "-s", fp])
100 if rc == 0:
101 for pid in runner.outs([fuser, fp]).split():
102 fd = open("/proc/%s/cmdline" % pid, "r")
103 cmdline = fd.read()
104 fd.close()
105 if cmdline[:-1] == "/bin/bash":
106 return True
107
108 # not found
109 return False
110
111class BindChrootMount:
112 """Represents a bind mount of a directory into a chroot."""
113 def __init__(self, src, chroot, dest = None, option = None):
114 self.root = os.path.abspath(os.path.expanduser(chroot))
115 self.option = option
116
117 self.orig_src = self.src = src
118 if os.path.islink(src):
119 self.src = os.readlink(src)
120 if not self.src.startswith('/'):
121 self.src = os.path.abspath(os.path.join(os.path.dirname(src),
122 self.src))
123
124 if not dest:
125 dest = self.src
126 self.dest = os.path.join(self.root, dest.lstrip('/'))
127
128 self.mounted = False
129 self.mountcmd = find_binary_path("mount")
130 self.umountcmd = find_binary_path("umount")
131
132 def ismounted(self):
133 with open('/proc/mounts') as f:
134 for line in f:
135 if line.split()[1] == os.path.abspath(self.dest):
136 return True
137
138 return False
139
140 def has_chroot_instance(self):
141 lock = os.path.join(self.root, ".chroot.lock")
142 return my_fuser(lock)
143
144 def mount(self):
145 if self.mounted or self.ismounted():
146 return
147
148 makedirs(self.dest)
149 rc = runner.show([self.mountcmd, "--bind", self.src, self.dest])
150 if rc != 0:
151 raise MountError("Bind-mounting '%s' to '%s' failed" %
152 (self.src, self.dest))
153 if self.option:
154 rc = runner.show([self.mountcmd, "--bind", "-o", "remount,%s" % self.option, self.dest])
155 if rc != 0:
156 raise MountError("Bind-remounting '%s' failed" % self.dest)
157
158 self.mounted = True
159 if os.path.islink(self.orig_src):
160 dest = os.path.join(self.root, self.orig_src.lstrip('/'))
161 if not os.path.exists(dest):
162 os.symlink(self.src, dest)
163
164 def unmount(self):
165 if self.has_chroot_instance():
166 return
167
168 if self.ismounted():
169 runner.show([self.umountcmd, "-l", self.dest])
170 self.mounted = False
171
172class LoopbackMount:
173 """LoopbackMount compatibility layer for old API"""
174 def __init__(self, lofile, mountdir, fstype = None):
175 self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True)
176 self.losetup = False
177 self.losetupcmd = find_binary_path("losetup")
178
179 def cleanup(self):
180 self.diskmount.cleanup()
181
182 def unmount(self):
183 self.diskmount.unmount()
184
185 def lounsetup(self):
186 if self.losetup:
187 runner.show([self.losetupcmd, "-d", self.loopdev])
188 self.losetup = False
189 self.loopdev = None
190
191 def loopsetup(self):
192 if self.losetup:
193 return
194
195 self.loopdev = get_loop_device(self.losetupcmd, self.lofile)
196 self.losetup = True
197
198 def mount(self):
199 self.diskmount.mount()
200
201class SparseLoopbackMount(LoopbackMount):
202 """SparseLoopbackMount compatibility layer for old API"""
203 def __init__(self, lofile, mountdir, size, fstype = None):
204 self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True)
205
206 def expand(self, create = False, size = None):
207 self.diskmount.disk.expand(create, size)
208
209 def truncate(self, size = None):
210 self.diskmount.disk.truncate(size)
211
212 def create(self):
213 self.diskmount.disk.create()
214
215class SparseExtLoopbackMount(SparseLoopbackMount):
216 """SparseExtLoopbackMount compatibility layer for old API"""
217 def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel):
218 self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True)
219
220
221 def __format_filesystem(self):
222 self.diskmount.__format_filesystem()
223
224 def create(self):
225 self.diskmount.disk.create()
226
227 def resize(self, size = None):
228 return self.diskmount.__resize_filesystem(size)
229
230 def mount(self):
231 self.diskmount.mount()
232
233 def __fsck(self):
234 self.extdiskmount.__fsck()
235
236 def __get_size_from_filesystem(self):
237 return self.diskmount.__get_size_from_filesystem()
238
239 def __resize_to_minimal(self):
240 return self.diskmount.__resize_to_minimal()
241
242 def resparse(self, size = None):
243 return self.diskmount.resparse(size)
244
245class Disk:
246 """Generic base object for a disk
247
248 The 'create' method must make the disk visible as a block device - eg
249 by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup'
250 method must undo the 'create' operation.
251 """
252 def __init__(self, size, device = None):
253 self._device = device
254 self._size = size
255
256 def create(self):
257 pass
258
259 def cleanup(self):
260 pass
261
262 def get_device(self):
263 return self._device
264 def set_device(self, path):
265 self._device = path
266 device = property(get_device, set_device)
267
268 def get_size(self):
269 return self._size
270 size = property(get_size)
271
272
273class RawDisk(Disk):
274 """A Disk backed by a block device.
275 Note that create() is a no-op.
276 """
277 def __init__(self, size, device):
278 Disk.__init__(self, size, device)
279
280 def fixed(self):
281 return True
282
283 def exists(self):
284 return True
285
286
287class DiskImage(Disk):
288 """
289 A Disk backed by a file.
290 """
291 def __init__(self, image_file, size):
292 Disk.__init__(self, size)
293 self.image_file = image_file
294
295 def exists(self):
296 return os.path.exists(self.image_file)
297
298 def create(self):
299 if self.device is not None:
300 return
301
302 blocks = self.size / 1024
303 if self.size - blocks * 1024:
304 blocks += 1
305
306 # create disk image
307 dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=1" % \
308 (self.image_file, blocks)
309 rc, out = exec_cmd(dd_cmd)
310
311 self.device = self.image_file
312
313
314class LoopbackDisk(Disk):
315 """A Disk backed by a file via the loop module."""
316 def __init__(self, lofile, size):
317 Disk.__init__(self, size)
318 self.lofile = lofile
319 self.losetupcmd = find_binary_path("losetup")
320
321 def fixed(self):
322 return False
323
324 def exists(self):
325 return os.path.exists(self.lofile)
326
327 def create(self):
328 if self.device is not None:
329 return
330
331 self.device = get_loop_device(self.losetupcmd, self.lofile)
332
333 def cleanup(self):
334 if self.device is None:
335 return
336 msger.debug("Losetup remove %s" % self.device)
337 rc = runner.show([self.losetupcmd, "-d", self.device])
338 self.device = None
339
340class SparseLoopbackDisk(LoopbackDisk):
341 """A Disk backed by a sparse file via the loop module."""
342 def __init__(self, lofile, size):
343 LoopbackDisk.__init__(self, lofile, size)
344
345 def expand(self, create = False, size = None):
346 flags = os.O_WRONLY
347 if create:
348 flags |= os.O_CREAT
349 if not os.path.exists(self.lofile):
350 makedirs(os.path.dirname(self.lofile))
351
352 if size is None:
353 size = self.size
354
355 msger.debug("Extending sparse file %s to %d" % (self.lofile, size))
356 if create:
357 fd = os.open(self.lofile, flags, 0644)
358 else:
359 fd = os.open(self.lofile, flags)
360
361 if size <= 0:
362 size = 1
363 try:
364 os.ftruncate(fd, size)
365 except:
366 # may be limited by 2G in 32bit env
367 os.ftruncate(fd, 2**31L)
368
369 os.close(fd)
370
371 def truncate(self, size = None):
372 if size is None:
373 size = self.size
374
375 msger.debug("Truncating sparse file %s to %d" % (self.lofile, size))
376 fd = os.open(self.lofile, os.O_WRONLY)
377 os.ftruncate(fd, size)
378 os.close(fd)
379
380 def create(self):
381 self.expand(create = True)
382 LoopbackDisk.create(self)
383
384class Mount:
385 """A generic base class to deal with mounting things."""
386 def __init__(self, mountdir):
387 self.mountdir = mountdir
388
389 def cleanup(self):
390 self.unmount()
391
392 def mount(self, options = None):
393 pass
394
395 def unmount(self):
396 pass
397
398class DiskMount(Mount):
399 """A Mount object that handles mounting of a Disk."""
400 def __init__(self, disk, mountdir, fstype = None, rmmountdir = True):
401 Mount.__init__(self, mountdir)
402
403 self.disk = disk
404 self.fstype = fstype
405 self.rmmountdir = rmmountdir
406
407 self.mounted = False
408 self.rmdir = False
409 if fstype:
410 self.mkfscmd = find_binary_path("mkfs." + self.fstype)
411 else:
412 self.mkfscmd = None
413 self.mountcmd = find_binary_path("mount")
414 self.umountcmd = find_binary_path("umount")
415
416 def cleanup(self):
417 Mount.cleanup(self)
418 self.disk.cleanup()
419
420 def unmount(self):
421 if self.mounted:
422 msger.debug("Unmounting directory %s" % self.mountdir)
423 runner.quiet('sync') # sync the data on this mount point
424 rc = runner.show([self.umountcmd, "-l", self.mountdir])
425 if rc == 0:
426 self.mounted = False
427 else:
428 raise MountError("Failed to umount %s" % self.mountdir)
429 if self.rmdir and not self.mounted:
430 try:
431 os.rmdir(self.mountdir)
432 except OSError, e:
433 pass
434 self.rmdir = False
435
436
437 def __create(self):
438 self.disk.create()
439
440
441 def mount(self, options = None):
442 if self.mounted:
443 return
444
445 if not os.path.isdir(self.mountdir):
446 msger.debug("Creating mount point %s" % self.mountdir)
447 os.makedirs(self.mountdir)
448 self.rmdir = self.rmmountdir
449
450 self.__create()
451
452 msger.debug("Mounting %s at %s" % (self.disk.device, self.mountdir))
453 if options:
454 args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ]
455 else:
456 args = [ self.mountcmd, self.disk.device, self.mountdir ]
457 if self.fstype:
458 args.extend(["-t", self.fstype])
459
460 rc = runner.show(args)
461 if rc != 0:
462 raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" %
463 (self.disk.device, self.mountdir, " ".join(args), rc))
464
465 self.mounted = True
466
467class ExtDiskMount(DiskMount):
468 """A DiskMount object that is able to format/resize ext[23] filesystems."""
469 def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
470 DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
471 self.blocksize = blocksize
472 self.fslabel = fslabel.replace("/", "")
473 self.uuid = str(uuid.uuid4())
474 self.skipformat = skipformat
475 self.fsopts = fsopts
476 self.extopts = None
477 self.dumpe2fs = find_binary_path("dumpe2fs")
478 self.tune2fs = find_binary_path("tune2fs")
479
480 def __parse_field(self, output, field):
481 for line in output.split("\n"):
482 if line.startswith(field + ":"):
483 return line[len(field) + 1:].strip()
484
485 raise KeyError("Failed to find field '%s' in output" % field)
486
487 def __format_filesystem(self):
488 if self.skipformat:
489 msger.debug("Skip filesystem format.")
490 return
491
492 msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
493 cmdlist = [self.mkfscmd, "-F", "-L", self.fslabel, "-m", "1", "-b",
494 str(self.blocksize), "-U", self.uuid]
495 if self.extopts:
496 cmdlist.extend(self.extopts.split())
497 cmdlist.extend([self.disk.device])
498
499 rc, errout = runner.runtool(cmdlist, catch=2)
500 if rc != 0:
501 raise MountError("Error creating %s filesystem on disk %s:\n%s" %
502 (self.fstype, self.disk.device, errout))
503
504 if not self.extopts:
505 msger.debug("Tuning filesystem on %s" % self.disk.device)
506 runner.show([self.tune2fs, "-c0", "-i0", "-Odir_index", "-ouser_xattr,acl", self.disk.device])
507
508 def __resize_filesystem(self, size = None):
509 current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
510
511 if size is None:
512 size = self.disk.size
513
514 if size == current_size:
515 return
516
517 if size > current_size:
518 self.disk.expand(size)
519
520 self.__fsck()
521
522 resize2fs(self.disk.lofile, size)
523 return size
524
525 def __create(self):
526 resize = False
527 if not self.disk.fixed() and self.disk.exists():
528 resize = True
529
530 self.disk.create()
531
532 if resize:
533 self.__resize_filesystem()
534 else:
535 self.__format_filesystem()
536
537 def mount(self, options = None):
538 self.__create()
539 DiskMount.mount(self, options)
540
541 def __fsck(self):
542 msger.info("Checking filesystem %s" % self.disk.lofile)
543 runner.quiet(["/sbin/e2fsck", "-f", "-y", self.disk.lofile])
544
545 def __get_size_from_filesystem(self):
546 return int(self.__parse_field(runner.outs([self.dumpe2fs, '-h', self.disk.lofile]),
547 "Block count")) * self.blocksize
548
549 def __resize_to_minimal(self):
550 self.__fsck()
551
552 #
553 # Use a binary search to find the minimal size
554 # we can resize the image to
555 #
556 bot = 0
557 top = self.__get_size_from_filesystem()
558 while top != (bot + 1):
559 t = bot + ((top - bot) / 2)
560
561 if not resize2fs(self.disk.lofile, t):
562 top = t
563 else:
564 bot = t
565 return top
566
567 def resparse(self, size = None):
568 self.cleanup()
569 if size == 0:
570 minsize = 0
571 else:
572 minsize = self.__resize_to_minimal()
573 self.disk.truncate(minsize)
574
575 self.__resize_filesystem(size)
576 return minsize
577
578class VfatDiskMount(DiskMount):
579 """A DiskMount object that is able to format vfat/msdos filesystems."""
580 def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
581 DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
582 self.blocksize = blocksize
583 self.fslabel = fslabel.replace("/", "")
584 rand1 = random.randint(0, 2**16 - 1)
585 rand2 = random.randint(0, 2**16 - 1)
586 self.uuid = "%04X-%04X" % (rand1, rand2)
587 self.skipformat = skipformat
588 self.fsopts = fsopts
589 self.fsckcmd = find_binary_path("fsck." + self.fstype)
590
591 def __format_filesystem(self):
592 if self.skipformat:
593 msger.debug("Skip filesystem format.")
594 return
595
596 msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
597 rc = runner.show([self.mkfscmd, "-n", self.fslabel,
598 "-i", self.uuid.replace("-", ""), self.disk.device])
599 if rc != 0:
600 raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
601
602 msger.verbose("Tuning filesystem on %s" % self.disk.device)
603
604 def __resize_filesystem(self, size = None):
605 current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
606
607 if size is None:
608 size = self.disk.size
609
610 if size == current_size:
611 return
612
613 if size > current_size:
614 self.disk.expand(size)
615
616 self.__fsck()
617
618 #resize2fs(self.disk.lofile, size)
619 return size
620
621 def __create(self):
622 resize = False
623 if not self.disk.fixed() and self.disk.exists():
624 resize = True
625
626 self.disk.create()
627
628 if resize:
629 self.__resize_filesystem()
630 else:
631 self.__format_filesystem()
632
633 def mount(self, options = None):
634 self.__create()
635 DiskMount.mount(self, options)
636
637 def __fsck(self):
638 msger.debug("Checking filesystem %s" % self.disk.lofile)
639 runner.show([self.fsckcmd, "-y", self.disk.lofile])
640
641 def __get_size_from_filesystem(self):
642 return self.disk.size
643
644 def __resize_to_minimal(self):
645 self.__fsck()
646
647 #
648 # Use a binary search to find the minimal size
649 # we can resize the image to
650 #
651 bot = 0
652 top = self.__get_size_from_filesystem()
653 return top
654
655 def resparse(self, size = None):
656 self.cleanup()
657 minsize = self.__resize_to_minimal()
658 self.disk.truncate(minsize)
659 self.__resize_filesystem(size)
660 return minsize
661
662class BtrfsDiskMount(DiskMount):
663 """A DiskMount object that is able to format/resize btrfs filesystems."""
664 def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
665 self.__check_btrfs()
666 DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
667 self.blocksize = blocksize
668 self.fslabel = fslabel.replace("/", "")
669 self.uuid = None
670 self.skipformat = skipformat
671 self.fsopts = fsopts
672 self.blkidcmd = find_binary_path("blkid")
673 self.btrfsckcmd = find_binary_path("btrfsck")
674
675 def __check_btrfs(self):
676 found = False
677 """ Need to load btrfs module to mount it """
678 load_module("btrfs")
679 for line in open("/proc/filesystems").xreadlines():
680 if line.find("btrfs") > -1:
681 found = True
682 break
683 if not found:
684 raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.")
685
686 # disable selinux, selinux will block write
687 if os.path.exists("/usr/sbin/setenforce"):
688 runner.show(["/usr/sbin/setenforce", "0"])
689
690 def __parse_field(self, output, field):
691 for line in output.split(" "):
692 if line.startswith(field + "="):
693 return line[len(field) + 1:].strip().replace("\"", "")
694
695 raise KeyError("Failed to find field '%s' in output" % field)
696
697 def __format_filesystem(self):
698 if self.skipformat:
699 msger.debug("Skip filesystem format.")
700 return
701
702 msger.verbose("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
703 rc = runner.show([self.mkfscmd, "-L", self.fslabel, self.disk.device])
704 if rc != 0:
705 raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
706
707 self.uuid = self.__parse_field(runner.outs([self.blkidcmd, self.disk.device]), "UUID")
708
709 def __resize_filesystem(self, size = None):
710 current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
711
712 if size is None:
713 size = self.disk.size
714
715 if size == current_size:
716 return
717
718 if size > current_size:
719 self.disk.expand(size)
720
721 self.__fsck()
722 return size
723
724 def __create(self):
725 resize = False
726 if not self.disk.fixed() and self.disk.exists():
727 resize = True
728
729 self.disk.create()
730
731 if resize:
732 self.__resize_filesystem()
733 else:
734 self.__format_filesystem()
735
736 def mount(self, options = None):
737 self.__create()
738 DiskMount.mount(self, options)
739
740 def __fsck(self):
741 msger.debug("Checking filesystem %s" % self.disk.lofile)
742 runner.quiet([self.btrfsckcmd, self.disk.lofile])
743
744 def __get_size_from_filesystem(self):
745 return self.disk.size
746
747 def __resize_to_minimal(self):
748 self.__fsck()
749
750 return self.__get_size_from_filesystem()
751
752 def resparse(self, size = None):
753 self.cleanup()
754 minsize = self.__resize_to_minimal()
755 self.disk.truncate(minsize)
756 self.__resize_filesystem(size)
757 return minsize
758
759class DeviceMapperSnapshot(object):
760 def __init__(self, imgloop, cowloop):
761 self.imgloop = imgloop
762 self.cowloop = cowloop
763
764 self.__created = False
765 self.__name = None
766 self.dmsetupcmd = find_binary_path("dmsetup")
767
768 """Load dm_snapshot if it isn't loaded"""
769 load_module("dm_snapshot")
770
771 def get_path(self):
772 if self.__name is None:
773 return None
774 return os.path.join("/dev/mapper", self.__name)
775 path = property(get_path)
776
777 def create(self):
778 if self.__created:
779 return
780
781 self.imgloop.create()
782 self.cowloop.create()
783
784 self.__name = "imgcreate-%d-%d" % (os.getpid(),
785 random.randint(0, 2**16))
786
787 size = os.stat(self.imgloop.lofile)[stat.ST_SIZE]
788
789 table = "0 %d snapshot %s %s p 8" % (size / 512,
790 self.imgloop.device,
791 self.cowloop.device)
792
793 args = [self.dmsetupcmd, "create", self.__name, "--table", table]
794 if runner.show(args) != 0:
795 self.cowloop.cleanup()
796 self.imgloop.cleanup()
797 raise SnapshotError("Could not create snapshot device using: " + ' '.join(args))
798
799 self.__created = True
800
801 def remove(self, ignore_errors = False):
802 if not self.__created:
803 return
804
805 time.sleep(2)
806 rc = runner.show([self.dmsetupcmd, "remove", self.__name])
807 if not ignore_errors and rc != 0:
808 raise SnapshotError("Could not remove snapshot device")
809
810 self.__name = None
811 self.__created = False
812
813 self.cowloop.cleanup()
814 self.imgloop.cleanup()
815
816 def get_cow_used(self):
817 if not self.__created:
818 return 0
819
820 #
821 # dmsetup status on a snapshot returns e.g.
822 # "0 8388608 snapshot 416/1048576"
823 # or, more generally:
824 # "A B snapshot C/D"
825 # where C is the number of 512 byte sectors in use
826 #
827 out = runner.outs([self.dmsetupcmd, "status", self.__name])
828 try:
829 return int((out.split()[3]).split('/')[0]) * 512
830 except ValueError:
831 raise SnapshotError("Failed to parse dmsetup status: " + out)
832
833def create_image_minimizer(path, image, minimal_size):
834 """
835 Builds a copy-on-write image which can be used to
836 create a device-mapper snapshot of an image where
837 the image's filesystem is as small as possible
838
839 The steps taken are:
840 1) Create a sparse COW
841 2) Loopback mount the image and the COW
842 3) Create a device-mapper snapshot of the image
843 using the COW
844 4) Resize the filesystem to the minimal size
845 5) Determine the amount of space used in the COW
846 6) Restroy the device-mapper snapshot
847 7) Truncate the COW, removing unused space
848 8) Create a squashfs of the COW
849 """
850 imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter
851
852 cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"),
853 64L * 1024L * 1024L)
854
855 snapshot = DeviceMapperSnapshot(imgloop, cowloop)
856
857 try:
858 snapshot.create()
859
860 resize2fs(snapshot.path, minimal_size)
861
862 cow_used = snapshot.get_cow_used()
863 finally:
864 snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None))
865
866 cowloop.truncate(cow_used)
867
868 mksquashfs(cowloop.lofile, path)
869
870 os.unlink(cowloop.lofile)
871
872def load_module(module):
873 found = False
874 for line in open('/proc/modules').xreadlines():
875 if line.startswith("%s " % module):
876 found = True
877 break
878 if not found:
879 msger.info("Loading %s..." % module)
880 runner.quiet(['modprobe', module])
881
882class LoopDevice(object):
883 def __init__(self, loopid=None):
884 self.device = None
885 self.loopid = loopid
886 self.created = False
887 self.kpartxcmd = find_binary_path("kpartx")
888 self.losetupcmd = find_binary_path("losetup")
889
890 def register(self, device):
891 self.device = device
892 self.loopid = None
893 self.created = True
894
895 def reg_atexit(self):
896 import atexit
897 atexit.register(self.close)
898
899 def _genloopid(self):
900 import glob
901 if not glob.glob("/dev/loop[0-9]*"):
902 return 10
903
904 fint = lambda x: x[9:].isdigit() and int(x[9:]) or 0
905 maxid = 1 + max(filter(lambda x: x<100,
906 map(fint, glob.glob("/dev/loop[0-9]*"))))
907 if maxid < 10: maxid = 10
908 if maxid >= 100: raise
909 return maxid
910
911 def _kpseek(self, device):
912 rc, out = runner.runtool([self.kpartxcmd, '-l', '-v', device])
913 if rc != 0:
914 raise MountError("Can't query dm snapshot on %s" % device)
915 for line in out.splitlines():
916 if line and line.startswith("loop"):
917 return True
918 return False
919
920 def _loseek(self, device):
921 import re
922 rc, out = runner.runtool([self.losetupcmd, '-a'])
923 if rc != 0:
924 raise MountError("Failed to run 'losetup -a'")
925 for line in out.splitlines():
926 m = re.match("([^:]+): .*", line)
927 if m and m.group(1) == device:
928 return True
929 return False
930
931 def create(self):
932 if not self.created:
933 if not self.loopid:
934 self.loopid = self._genloopid()
935 self.device = "/dev/loop%d" % self.loopid
936 if os.path.exists(self.device):
937 if self._loseek(self.device):
938 raise MountError("Device busy: %s" % self.device)
939 else:
940 self.created = True
941 return
942
943 mknod = find_binary_path('mknod')
944 rc = runner.show([mknod, '-m664', self.device, 'b', '7', str(self.loopid)])
945 if rc != 0:
946 raise MountError("Failed to create device %s" % self.device)
947 else:
948 self.created = True
949
950 def close(self):
951 if self.created:
952 try:
953 self.cleanup()
954 self.device = None
955 except MountError, e:
956 msger.error("%s" % e)
957
958 def cleanup(self):
959
960 if self.device is None:
961 return
962
963
964 if self._kpseek(self.device):
965 if self.created:
966 for i in range(3, os.sysconf("SC_OPEN_MAX")):
967 try:
968 os.close(i)
969 except:
970 pass
971 runner.quiet([self.kpartxcmd, "-d", self.device])
972 if self._loseek(self.device):
973 runner.quiet([self.losetupcmd, "-d", self.device])
974 # FIXME: should sleep a while between two loseek
975 if self._loseek(self.device):
976 msger.warning("Can't cleanup loop device %s" % self.device)
977 elif self.loopid:
978 os.unlink(self.device)
979
980DEVICE_PIDFILE_DIR = "/var/tmp/mic/device"
981DEVICE_LOCKFILE = "/var/lock/__mic_loopdev.lock"
982
983def get_loop_device(losetupcmd, lofile):
984 global DEVICE_PIDFILE_DIR
985 global DEVICE_LOCKFILE
986
987 import fcntl
988 makedirs(os.path.dirname(DEVICE_LOCKFILE))
989 fp = open(DEVICE_LOCKFILE, 'w')
990 fcntl.flock(fp, fcntl.LOCK_EX)
991 try:
992 loopdev = None
993 devinst = LoopDevice()
994
995 # clean up left loop device first
996 clean_loop_devices()
997
998 # provide an avaible loop device
999 rc, out = runner.runtool([losetupcmd, "--find"])
1000 if rc == 0:
1001 loopdev = out.split()[0]
1002 devinst.register(loopdev)
1003 if not loopdev or not os.path.exists(loopdev):
1004 devinst.create()
1005 loopdev = devinst.device
1006
1007 # setup a loop device for image file
1008 rc = runner.show([losetupcmd, loopdev, lofile])
1009 if rc != 0:
1010 raise MountError("Failed to setup loop device for '%s'" % lofile)
1011
1012 devinst.reg_atexit()
1013
1014 # try to save device and pid
1015 makedirs(DEVICE_PIDFILE_DIR)
1016 pidfile = os.path.join(DEVICE_PIDFILE_DIR, os.path.basename(loopdev))
1017 if os.path.exists(pidfile):
1018 os.unlink(pidfile)
1019 with open(pidfile, 'w') as wf:
1020 wf.write(str(os.getpid()))
1021
1022 except MountError, err:
1023 raise CreatorError("%s" % str(err))
1024 except:
1025 raise
1026 finally:
1027 try:
1028 fcntl.flock(fp, fcntl.LOCK_UN)
1029 fp.close()
1030 os.unlink(DEVICE_LOCKFILE)
1031 except:
1032 pass
1033
1034 return loopdev
1035
1036def clean_loop_devices(piddir=DEVICE_PIDFILE_DIR):
1037 if not os.path.exists(piddir) or not os.path.isdir(piddir):
1038 return
1039
1040 for loopdev in os.listdir(piddir):
1041 pidfile = os.path.join(piddir, loopdev)
1042 try:
1043 with open(pidfile, 'r') as rf:
1044 devpid = int(rf.read())
1045 except:
1046 devpid = None
1047
1048 # if the process using this device is alive, skip it
1049 if not devpid or os.path.exists(os.path.join('/proc', str(devpid))):
1050 continue
1051
1052 # try to clean it up
1053 try:
1054 devinst = LoopDevice()
1055 devinst.register(os.path.join('/dev', loopdev))
1056 devinst.cleanup()
1057 os.unlink(pidfile)
1058 except:
1059 pass
1060
diff --git a/scripts/lib/mic/utils/gpt_parser.py b/scripts/lib/mic/utils/gpt_parser.py
new file mode 100644
index 0000000000..5d43b70778
--- /dev/null
+++ b/scripts/lib/mic/utils/gpt_parser.py
@@ -0,0 +1,331 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2013 Intel, Inc.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms of the GNU General Public License as published by the Free
7# Software Foundation; version 2 of the License
8#
9# This program is distributed in the hope that it will be useful, but
10# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12# for more details.
13#
14# You should have received a copy of the GNU General Public License along
15# with this program; if not, write to the Free Software Foundation, Inc., 59
16# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18""" This module implements a simple GPT partitions parser which can read the
19GPT header and the GPT partition table. """
20
21import struct
22import uuid
23import binascii
24from mic.utils.errors import MountError
25
26_GPT_HEADER_FORMAT = "<8s4sIIIQQQQ16sQIII"
27_GPT_HEADER_SIZE = struct.calcsize(_GPT_HEADER_FORMAT)
28_GPT_ENTRY_FORMAT = "<16s16sQQQ72s"
29_GPT_ENTRY_SIZE = struct.calcsize(_GPT_ENTRY_FORMAT)
30_SUPPORTED_GPT_REVISION = '\x00\x00\x01\x00'
31
32def _stringify_uuid(binary_uuid):
33 """ A small helper function to transform a binary UUID into a string
34 format. """
35
36 uuid_str = str(uuid.UUID(bytes_le = binary_uuid))
37
38 return uuid_str.upper()
39
40def _calc_header_crc(raw_hdr):
41 """ Calculate GPT header CRC32 checksum. The 'raw_hdr' parameter has to
42 be a list or a tuple containing all the elements of the GPT header in a
43 "raw" form, meaning that it should simply contain "unpacked" disk data.
44 """
45
46 raw_hdr = list(raw_hdr)
47 raw_hdr[3] = 0
48 raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
49
50 return binascii.crc32(raw_hdr) & 0xFFFFFFFF
51
52def _validate_header(raw_hdr):
53 """ Validate the GPT header. The 'raw_hdr' parameter has to be a list or a
54 tuple containing all the elements of the GPT header in a "raw" form,
55 meaning that it should simply contain "unpacked" disk data. """
56
57 # Validate the signature
58 if raw_hdr[0] != 'EFI PART':
59 raise MountError("GPT partition table not found")
60
61 # Validate the revision
62 if raw_hdr[1] != _SUPPORTED_GPT_REVISION:
63 raise MountError("Unsupported GPT revision '%s', supported revision " \
64 "is '%s'" % \
65 (binascii.hexlify(raw_hdr[1]),
66 binascii.hexlify(_SUPPORTED_GPT_REVISION)))
67
68 # Validate header size
69 if raw_hdr[2] != _GPT_HEADER_SIZE:
70 raise MountError("Bad GPT header size: %d bytes, expected %d" % \
71 (raw_hdr[2], _GPT_HEADER_SIZE))
72
73 crc = _calc_header_crc(raw_hdr)
74 if raw_hdr[3] != crc:
75 raise MountError("GPT header crc mismatch: %#x, should be %#x" % \
76 (crc, raw_hdr[3]))
77
78class GptParser:
79 """ GPT partition table parser. Allows reading the GPT header and the
80 partition table, as well as modifying the partition table records. """
81
82 def __init__(self, disk_path, sector_size = 512):
83 """ The class constructor which accepts the following parameters:
84 * disk_path - full path to the disk image or device node
85 * sector_size - size of a disk sector in bytes """
86
87 self.sector_size = sector_size
88 self.disk_path = disk_path
89
90 try:
91 self._disk_obj = open(disk_path, 'r+b')
92 except IOError as err:
93 raise MountError("Cannot open file '%s' for reading GPT " \
94 "partitions: %s" % (disk_path, err))
95
96 def __del__(self):
97 """ The class destructor. """
98
99 self._disk_obj.close()
100
101 def _read_disk(self, offset, size):
102 """ A helper function which reads 'size' bytes from offset 'offset' of
103 the disk and checks all the error conditions. """
104
105 self._disk_obj.seek(offset)
106 try:
107 data = self._disk_obj.read(size)
108 except IOError as err:
109 raise MountError("cannot read from '%s': %s" % \
110 (self.disk_path, err))
111
112 if len(data) != size:
113 raise MountError("cannot read %d bytes from offset '%d' of '%s', " \
114 "read only %d bytes" % \
115 (size, offset, self.disk_path, len(data)))
116
117 return data
118
119 def _write_disk(self, offset, buf):
120 """ A helper function which writes buffer 'buf' to offset 'offset' of
121 the disk. This function takes care of unaligned writes and checks all
122 the error conditions. """
123
124 # Since we may be dealing with a block device, we only can write in
125 # 'self.sector_size' chunks. Find the aligned starting and ending
126 # disk offsets to read.
127 start = (offset / self.sector_size) * self.sector_size
128 end = ((start + len(buf)) / self.sector_size + 1) * self.sector_size
129
130 data = self._read_disk(start, end - start)
131 off = offset - start
132 data = data[:off] + buf + data[off + len(buf):]
133
134 self._disk_obj.seek(start)
135 try:
136 self._disk_obj.write(data)
137 except IOError as err:
138 raise MountError("cannot write to '%s': %s" % (self.disk_path, err))
139
140 def read_header(self, primary = True):
141 """ Read and verify the GPT header and return a dictionary containing
142 the following elements:
143
144 'signature' : header signature
145 'revision' : header revision
146 'hdr_size' : header size in bytes
147 'hdr_crc' : header CRC32
148 'hdr_lba' : LBA of this header
149 'hdr_offs' : byte disk offset of this header
150 'backup_lba' : backup header LBA
151 'backup_offs' : byte disk offset of backup header
152 'first_lba' : first usable LBA for partitions
153 'first_offs' : first usable byte disk offset for partitions
154 'last_lba' : last usable LBA for partitions
155 'last_offs' : last usable byte disk offset for partitions
156 'disk_uuid' : UUID of the disk
157 'ptable_lba' : starting LBA of array of partition entries
158 'ptable_offs' : disk byte offset of the start of the partition table
159 'ptable_size' : partition table size in bytes
160 'entries_cnt' : number of available partition table entries
161 'entry_size' : size of a single partition entry
162 'ptable_crc' : CRC32 of the partition table
163 'primary' : a boolean, if 'True', this is the primary GPT header,
164 if 'False' - the secondary
165 'primary_str' : contains string "primary" if this is the primary GPT
166 header, and "backup" otherwise
167
168 This dictionary corresponds to the GPT header format. Please, see the
169 UEFI standard for the description of these fields.
170
171 If the 'primary' parameter is 'True', the primary GPT header is read,
172 otherwise the backup GPT header is read instead. """
173
174 # Read and validate the primary GPT header
175 raw_hdr = self._read_disk(self.sector_size, _GPT_HEADER_SIZE)
176 raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
177 _validate_header(raw_hdr)
178 primary_str = "primary"
179
180 if not primary:
181 # Read and validate the backup GPT header
182 raw_hdr = self._read_disk(raw_hdr[6] * self.sector_size, _GPT_HEADER_SIZE)
183 raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
184 _validate_header(raw_hdr)
185 primary_str = "backup"
186
187 return { 'signature' : raw_hdr[0],
188 'revision' : raw_hdr[1],
189 'hdr_size' : raw_hdr[2],
190 'hdr_crc' : raw_hdr[3],
191 'hdr_lba' : raw_hdr[5],
192 'hdr_offs' : raw_hdr[5] * self.sector_size,
193 'backup_lba' : raw_hdr[6],
194 'backup_offs' : raw_hdr[6] * self.sector_size,
195 'first_lba' : raw_hdr[7],
196 'first_offs' : raw_hdr[7] * self.sector_size,
197 'last_lba' : raw_hdr[8],
198 'last_offs' : raw_hdr[8] * self.sector_size,
199 'disk_uuid' :_stringify_uuid(raw_hdr[9]),
200 'ptable_lba' : raw_hdr[10],
201 'ptable_offs' : raw_hdr[10] * self.sector_size,
202 'ptable_size' : raw_hdr[11] * raw_hdr[12],
203 'entries_cnt' : raw_hdr[11],
204 'entry_size' : raw_hdr[12],
205 'ptable_crc' : raw_hdr[13],
206 'primary' : primary,
207 'primary_str' : primary_str }
208
209 def _read_raw_ptable(self, header):
210 """ Read and validate primary or backup partition table. The 'header'
211 argument is the GPT header. If it is the primary GPT header, then the
212 primary partition table is read and validated, otherwise - the backup
213 one. The 'header' argument is a dictionary which is returned by the
214 'read_header()' method. """
215
216 raw_ptable = self._read_disk(header['ptable_offs'],
217 header['ptable_size'])
218
219 crc = binascii.crc32(raw_ptable) & 0xFFFFFFFF
220 if crc != header['ptable_crc']:
221 raise MountError("Partition table at LBA %d (%s) is corrupted" % \
222 (header['ptable_lba'], header['primary_str']))
223
224 return raw_ptable
225
226 def get_partitions(self, primary = True):
227 """ This is a generator which parses the GPT partition table and
228 generates the following dictionary for each partition:
229
230 'index' : the index of the partition table endry
231 'offs' : byte disk offset of the partition table entry
232 'type_uuid' : partition type UUID
233 'part_uuid' : partition UUID
234 'first_lba' : the first LBA
235 'last_lba' : the last LBA
236 'flags' : attribute flags
237 'name' : partition name
238 'primary' : a boolean, if 'True', this is the primary partition
239 table, if 'False' - the secondary
240 'primary_str' : contains string "primary" if this is the primary GPT
241 header, and "backup" otherwise
242
243 This dictionary corresponds to the GPT header format. Please, see the
244 UEFI standard for the description of these fields.
245
246 If the 'primary' parameter is 'True', partitions from the primary GPT
247 partition table are generated, otherwise partitions from the backup GPT
248 partition table are generated. """
249
250 if primary:
251 primary_str = "primary"
252 else:
253 primary_str = "backup"
254
255 header = self.read_header(primary)
256 raw_ptable = self._read_raw_ptable(header)
257
258 for index in xrange(0, header['entries_cnt']):
259 start = header['entry_size'] * index
260 end = start + header['entry_size']
261 raw_entry = struct.unpack(_GPT_ENTRY_FORMAT, raw_ptable[start:end])
262
263 if raw_entry[2] == 0 or raw_entry[3] == 0:
264 continue
265
266 part_name = str(raw_entry[5].decode('UTF-16').split('\0', 1)[0])
267
268 yield { 'index' : index,
269 'offs' : header['ptable_offs'] + start,
270 'type_uuid' : _stringify_uuid(raw_entry[0]),
271 'part_uuid' : _stringify_uuid(raw_entry[1]),
272 'first_lba' : raw_entry[2],
273 'last_lba' : raw_entry[3],
274 'flags' : raw_entry[4],
275 'name' : part_name,
276 'primary' : primary,
277 'primary_str' : primary_str }
278
279 def _change_partition(self, header, entry):
280 """ A helper function for 'change_partitions()' which changes a
281 a paricular instance of the partition table (primary or backup). """
282
283 if entry['index'] >= header['entries_cnt']:
284 raise MountError("Partition table at LBA %d has only %d " \
285 "records cannot change record number %d" % \
286 (header['entries_cnt'], entry['index']))
287 # Read raw GPT header
288 raw_hdr = self._read_disk(header['hdr_offs'], _GPT_HEADER_SIZE)
289 raw_hdr = list(struct.unpack(_GPT_HEADER_FORMAT, raw_hdr))
290 _validate_header(raw_hdr)
291
292 # Prepare the new partition table entry
293 raw_entry = struct.pack(_GPT_ENTRY_FORMAT,
294 uuid.UUID(entry['type_uuid']).bytes_le,
295 uuid.UUID(entry['part_uuid']).bytes_le,
296 entry['first_lba'],
297 entry['last_lba'],
298 entry['flags'],
299 entry['name'].encode('UTF-16'))
300
301 # Write the updated entry to the disk
302 entry_offs = header['ptable_offs'] + \
303 header['entry_size'] * entry['index']
304 self._write_disk(entry_offs, raw_entry)
305
306 # Calculate and update partition table CRC32
307 raw_ptable = self._read_disk(header['ptable_offs'],
308 header['ptable_size'])
309 raw_hdr[13] = binascii.crc32(raw_ptable) & 0xFFFFFFFF
310
311 # Calculate and update the GPT header CRC
312 raw_hdr[3] = _calc_header_crc(raw_hdr)
313
314 # Write the updated header to the disk
315 raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
316 self._write_disk(header['hdr_offs'], raw_hdr)
317
318 def change_partition(self, entry):
319 """ Change a GPT partition. The 'entry' argument has the same format as
320 'get_partitions()' returns. This function simply changes the partition
321 table record corresponding to 'entry' in both, the primary and the
322 backup GPT partition tables. The parition table CRC is re-calculated
323 and the GPT headers are modified accordingly. """
324
325 # Change the primary partition table
326 header = self.read_header(True)
327 self._change_partition(header, entry)
328
329 # Change the backup partition table
330 header = self.read_header(False)
331 self._change_partition(header, entry)
diff --git a/scripts/lib/mic/utils/grabber.py b/scripts/lib/mic/utils/grabber.py
new file mode 100644
index 0000000000..45e30b4fb0
--- /dev/null
+++ b/scripts/lib/mic/utils/grabber.py
@@ -0,0 +1,97 @@
1#!/usr/bin/python
2
3import os
4import sys
5import rpm
6import fcntl
7import struct
8import termios
9
10from mic import msger
11from mic.utils import runner
12from mic.utils.errors import CreatorError
13
14from urlgrabber import grabber
15from urlgrabber import __version__ as grabber_version
16
17if rpm.labelCompare(grabber_version.split('.'), '3.9.0'.split('.')) == -1:
18 msger.warning("Version of python-urlgrabber is %s, lower than '3.9.0', "
19 "you may encounter some network issues" % grabber_version)
20
21def myurlgrab(url, filename, proxies, progress_obj = None):
22 g = grabber.URLGrabber()
23 if progress_obj is None:
24 progress_obj = TextProgress()
25
26 if url.startswith("file:/"):
27 filepath = "/%s" % url.replace("file:", "").lstrip('/')
28 if not os.path.exists(filepath):
29 raise CreatorError("URLGrabber error: can't find file %s" % url)
30 if url.endswith('.rpm'):
31 return filepath
32 else:
33 # untouch repometadata in source path
34 runner.show(['cp', '-f', filepath, filename])
35
36 else:
37 try:
38 filename = g.urlgrab(url=str(url),
39 filename=filename,
40 ssl_verify_host=False,
41 ssl_verify_peer=False,
42 proxies=proxies,
43 http_headers=(('Pragma', 'no-cache'),),
44 quote=0,
45 progress_obj=progress_obj)
46 except grabber.URLGrabError, err:
47 msg = str(err)
48 if msg.find(url) < 0:
49 msg += ' on %s' % url
50 raise CreatorError(msg)
51
52 return filename
53
54def terminal_width(fd=1):
55 """ Get the real terminal width """
56 try:
57 buf = 'abcdefgh'
58 buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf)
59 return struct.unpack('hhhh', buf)[1]
60 except: # IOError
61 return 80
62
63def truncate_url(url, width):
64 return os.path.basename(url)[0:width]
65
66class TextProgress(object):
67 # make the class as singleton
68 _instance = None
69 def __new__(cls, *args, **kwargs):
70 if not cls._instance:
71 cls._instance = super(TextProgress, cls).__new__(cls, *args, **kwargs)
72
73 return cls._instance
74
75 def __init__(self, totalnum = None):
76 self.total = totalnum
77 self.counter = 1
78
79 def start(self, filename, url, *args, **kwargs):
80 self.url = url
81 self.termwidth = terminal_width()
82 msger.info("\r%-*s" % (self.termwidth, " "))
83 if self.total is None:
84 msger.info("\rRetrieving %s ..." % truncate_url(self.url, self.termwidth - 15))
85 else:
86 msger.info("\rRetrieving %s [%d/%d] ..." % (truncate_url(self.url, self.termwidth - 25), self.counter, self.total))
87
88 def update(self, *args):
89 pass
90
91 def end(self, *args):
92 if self.counter == self.total:
93 msger.raw("\n")
94
95 if self.total is not None:
96 self.counter += 1
97
diff --git a/scripts/lib/mic/utils/misc.py b/scripts/lib/mic/utils/misc.py
new file mode 100644
index 0000000000..95241d7f15
--- /dev/null
+++ b/scripts/lib/mic/utils/misc.py
@@ -0,0 +1,1065 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2010, 2011 Intel Inc.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms of the GNU General Public License as published by the Free
7# Software Foundation; version 2 of the License
8#
9# This program is distributed in the hope that it will be useful, but
10# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12# for more details.
13#
14# You should have received a copy of the GNU General Public License along
15# with this program; if not, write to the Free Software Foundation, Inc., 59
16# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18import os
19import sys
20import time
21import tempfile
22import re
23import shutil
24import glob
25import hashlib
26import subprocess
27import platform
28import traceback
29
30
31try:
32 import sqlite3 as sqlite
33except ImportError:
34 import sqlite
35
36try:
37 from xml.etree import cElementTree
38except ImportError:
39 import cElementTree
40xmlparse = cElementTree.parse
41
42from mic import msger
43from mic.utils.errors import CreatorError, SquashfsError
44from mic.utils.fs_related import find_binary_path, makedirs
45from mic.utils.proxy import get_proxy_for
46from mic.utils import runner
47
48
49RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)")
50RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s"
51SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm")
52
53
54def build_name(kscfg, release=None, prefix = None, suffix = None):
55 """Construct and return an image name string.
56
57 This is a utility function to help create sensible name and fslabel
58 strings. The name is constructed using the sans-prefix-and-extension
59 kickstart filename and the supplied prefix and suffix.
60
61 kscfg -- a path to a kickstart file
62 release -- a replacement to suffix for image release
63 prefix -- a prefix to prepend to the name; defaults to None, which causes
64 no prefix to be used
65 suffix -- a suffix to append to the name; defaults to None, which causes
66 a YYYYMMDDHHMM suffix to be used
67
68 Note, if maxlen is less then the len(suffix), you get to keep both pieces.
69
70 """
71 name = os.path.basename(kscfg)
72 idx = name.rfind('.')
73 if idx >= 0:
74 name = name[:idx]
75
76 if release is not None:
77 suffix = ""
78 if prefix is None:
79 prefix = ""
80 if suffix is None:
81 suffix = time.strftime("%Y%m%d%H%M")
82
83 if name.startswith(prefix):
84 name = name[len(prefix):]
85
86 prefix = "%s-" % prefix if prefix else ""
87 suffix = "-%s" % suffix if suffix else ""
88
89 ret = prefix + name + suffix
90 return ret
91
92def get_distro():
93 """Detect linux distribution, support "meego"
94 """
95
96 support_dists = ('SuSE',
97 'debian',
98 'fedora',
99 'redhat',
100 'centos',
101 'meego',
102 'moblin',
103 'tizen')
104 try:
105 (dist, ver, id) = platform.linux_distribution( \
106 supported_dists = support_dists)
107 except:
108 (dist, ver, id) = platform.dist( \
109 supported_dists = support_dists)
110
111 return (dist, ver, id)
112
113def get_distro_str():
114 """Get composited string for current linux distribution
115 """
116 (dist, ver, id) = get_distro()
117
118 if not dist:
119 return 'Unknown Linux Distro'
120 else:
121 distro_str = ' '.join(map(str.strip, (dist, ver, id)))
122 return distro_str.strip()
123
124_LOOP_RULE_PTH = None
125
126def hide_loopdev_presentation():
127 udev_rules = "80-prevent-loop-present.rules"
128 udev_rules_dir = [
129 '/usr/lib/udev/rules.d/',
130 '/lib/udev/rules.d/',
131 '/etc/udev/rules.d/'
132 ]
133
134 global _LOOP_RULE_PTH
135
136 for rdir in udev_rules_dir:
137 if os.path.exists(rdir):
138 _LOOP_RULE_PTH = os.path.join(rdir, udev_rules)
139
140 if not _LOOP_RULE_PTH:
141 return
142
143 try:
144 with open(_LOOP_RULE_PTH, 'w') as wf:
145 wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"')
146
147 runner.quiet('udevadm trigger')
148 except:
149 pass
150
151def unhide_loopdev_presentation():
152 global _LOOP_RULE_PTH
153
154 if not _LOOP_RULE_PTH:
155 return
156
157 try:
158 os.unlink(_LOOP_RULE_PTH)
159 runner.quiet('udevadm trigger')
160 except:
161 pass
162
163def extract_rpm(rpmfile, targetdir):
164 rpm2cpio = find_binary_path("rpm2cpio")
165 cpio = find_binary_path("cpio")
166
167 olddir = os.getcwd()
168 os.chdir(targetdir)
169
170 msger.verbose("Extract rpm file with cpio: %s" % rpmfile)
171 p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE)
172 p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout,
173 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
174 (sout, serr) = p2.communicate()
175 msger.verbose(sout or serr)
176
177 os.chdir(olddir)
178
179def compressing(fpath, method):
180 comp_map = {
181 "gz": "gzip",
182 "bz2": "bzip2"
183 }
184 if method not in comp_map:
185 raise CreatorError("Unsupport compress format: %s, valid values: %s"
186 % (method, ','.join(comp_map.keys())))
187 cmd = find_binary_path(comp_map[method])
188 rc = runner.show([cmd, "-f", fpath])
189 if rc:
190 raise CreatorError("Failed to %s file: %s" % (comp_map[method], fpath))
191
192def taring(dstfile, target):
193 import tarfile
194 basen, ext = os.path.splitext(dstfile)
195 comp = {".tar": None,
196 ".gz": "gz", # for .tar.gz
197 ".bz2": "bz2", # for .tar.bz2
198 ".tgz": "gz",
199 ".tbz": "bz2"}[ext]
200
201 # specify tarball file path
202 if not comp:
203 tarpath = dstfile
204 elif basen.endswith(".tar"):
205 tarpath = basen
206 else:
207 tarpath = basen + ".tar"
208 wf = tarfile.open(tarpath, 'w')
209
210 if os.path.isdir(target):
211 for item in os.listdir(target):
212 wf.add(os.path.join(target, item), item)
213 else:
214 wf.add(target, os.path.basename(target))
215 wf.close()
216
217 if comp:
218 compressing(tarpath, comp)
219 # when dstfile ext is ".tgz" and ".tbz", should rename
220 if not basen.endswith(".tar"):
221 shutil.move("%s.%s" % (tarpath, comp), dstfile)
222
223def ziping(dstfile, target):
224 import zipfile
225 wf = zipfile.ZipFile(dstfile, 'w', compression=zipfile.ZIP_DEFLATED)
226 if os.path.isdir(target):
227 for item in os.listdir(target):
228 fpath = os.path.join(target, item)
229 if not os.path.isfile(fpath):
230 continue
231 wf.write(fpath, item, zipfile.ZIP_DEFLATED)
232 else:
233 wf.write(target, os.path.basename(target), zipfile.ZIP_DEFLATED)
234 wf.close()
235
236pack_formats = {
237 ".tar": taring,
238 ".tar.gz": taring,
239 ".tar.bz2": taring,
240 ".tgz": taring,
241 ".tbz": taring,
242 ".zip": ziping,
243}
244
245def packing(dstfile, target):
246 (base, ext) = os.path.splitext(dstfile)
247 if ext in (".gz", ".bz2") and base.endswith(".tar"):
248 ext = ".tar" + ext
249 if ext not in pack_formats:
250 raise CreatorError("Unsupport pack format: %s, valid values: %s"
251 % (ext, ','.join(pack_formats.keys())))
252 func = pack_formats[ext]
253 # func should be callable
254 func(dstfile, target)
255
256def human_size(size):
257 """Return human readable string for Bytes size
258 """
259
260 if size <= 0:
261 return "0M"
262 import math
263 measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
264 expo = int(math.log(size, 1024))
265 mant = float(size/math.pow(1024, expo))
266 return "{0:.1f}{1:s}".format(mant, measure[expo])
267
268def get_block_size(file_obj):
269 """ Returns block size for file object 'file_obj'. Errors are indicated by
270 the 'IOError' exception. """
271
272 from fcntl import ioctl
273 import struct
274
275 # Get the block size of the host file-system for the image file by calling
276 # the FIGETBSZ ioctl (number 2).
277 binary_data = ioctl(file_obj, 2, struct.pack('I', 0))
278 return struct.unpack('I', binary_data)[0]
279
280def check_space_pre_cp(src, dst):
281 """Check whether disk space is enough before 'cp' like
282 operations, else exception will be raised.
283 """
284
285 srcsize = get_file_size(src) * 1024 * 1024
286 freesize = get_filesystem_avail(dst)
287 if srcsize > freesize:
288 raise CreatorError("space on %s(%s) is not enough for about %s files"
289 % (dst, human_size(freesize), human_size(srcsize)))
290
291def calc_hashes(file_path, hash_names, start = 0, end = None):
292 """ Calculate hashes for a file. The 'file_path' argument is the file
293 to calculate hash functions for, 'start' and 'end' are the starting and
294 ending file offset to calculate the has functions for. The 'hash_names'
295 argument is a list of hash names to calculate. Returns the the list
296 of calculated hash values in the hexadecimal form in the same order
297 as 'hash_names'.
298 """
299 if end == None:
300 end = os.path.getsize(file_path)
301
302 chunk_size = 65536
303 to_read = end - start
304 read = 0
305
306 hashes = []
307 for hash_name in hash_names:
308 hashes.append(hashlib.new(hash_name))
309
310 with open(file_path, "rb") as f:
311 f.seek(start)
312
313 while read < to_read:
314 if read + chunk_size > to_read:
315 chunk_size = to_read - read
316 chunk = f.read(chunk_size)
317 for hash_obj in hashes:
318 hash_obj.update(chunk)
319 read += chunk_size
320
321 result = []
322 for hash_obj in hashes:
323 result.append(hash_obj.hexdigest())
324
325 return result
326
327def get_md5sum(fpath):
328 return calc_hashes(fpath, ('md5', ))[0]
329
330
331def normalize_ksfile(ksconf, release, arch):
332 '''
333 Return the name of a normalized ks file in which macro variables
334 @BUILD_ID@ and @ARCH@ are replace with real values.
335
336 The original ks file is returned if no special macro is used, otherwise
337 a temp file is created and returned, which will be deleted when program
338 exits normally.
339 '''
340
341 if not release:
342 release = "latest"
343 if not arch or re.match(r'i.86', arch):
344 arch = "ia32"
345
346 with open(ksconf) as f:
347 ksc = f.read()
348
349 if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc:
350 return ksconf
351
352 msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf)
353 ksc = ksc.replace("@ARCH@", arch)
354 ksc = ksc.replace("@BUILD_ID@", release)
355
356 fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf))
357 os.write(fd, ksc)
358 os.close(fd)
359
360 msger.debug('normalized ks file:%s' % ksconf)
361
362 def remove_temp_ks():
363 try:
364 os.unlink(ksconf)
365 except OSError, err:
366 msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err))
367
368 import atexit
369 atexit.register(remove_temp_ks)
370
371 return ksconf
372
373
374def _check_mic_chroot(rootdir):
375 def _path(path):
376 return rootdir.rstrip('/') + path
377
378 release_files = map(_path, [ "/etc/moblin-release",
379 "/etc/meego-release",
380 "/etc/tizen-release"])
381
382 if not any(map(os.path.exists, release_files)):
383 msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir)
384
385 if not glob.glob(rootdir + "/boot/vmlinuz-*"):
386 msger.warning("Failed to find kernel module under %s" % rootdir)
387
388 return
389
390def selinux_check(arch, fstypes):
391 try:
392 getenforce = find_binary_path('getenforce')
393 except CreatorError:
394 return
395
396 selinux_status = runner.outs([getenforce])
397 if arch and arch.startswith("arm") and selinux_status == "Enforcing":
398 raise CreatorError("Can't create arm image if selinux is enabled, "
399 "please run 'setenforce 0' to disable selinux")
400
401 use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes)
402 if use_btrfs and selinux_status == "Enforcing":
403 raise CreatorError("Can't create btrfs image if selinux is enabled,"
404 " please run 'setenforce 0' to disable selinux")
405
406def get_image_type(path):
407 def _get_extension_name(path):
408 match = re.search("(?<=\.)\w+$", path)
409 if match:
410 return match.group(0)
411 else:
412 return None
413
414 if os.path.isdir(path):
415 _check_mic_chroot(path)
416 return "fs"
417
418 maptab = {
419 "tar": "loop",
420 "raw":"raw",
421 "vmdk":"vmdk",
422 "vdi":"vdi",
423 "iso":"livecd",
424 "usbimg":"liveusb",
425 }
426
427 extension = _get_extension_name(path)
428 if extension in maptab:
429 return maptab[extension]
430
431 fd = open(path, "rb")
432 file_header = fd.read(1024)
433 fd.close()
434 vdi_flag = "<<< Sun VirtualBox Disk Image >>>"
435 if file_header[0:len(vdi_flag)] == vdi_flag:
436 return maptab["vdi"]
437
438 output = runner.outs(['file', path])
439 isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*")
440 usbimgptn = re.compile(r".*x86 boot sector.*active.*")
441 rawptn = re.compile(r".*x86 boot sector.*")
442 vmdkptn = re.compile(r".*VMware. disk image.*")
443 ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*")
444 ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*")
445 btrfsimgptn = re.compile(r".*BTRFS.*")
446 if isoptn.match(output):
447 return maptab["iso"]
448 elif usbimgptn.match(output):
449 return maptab["usbimg"]
450 elif rawptn.match(output):
451 return maptab["raw"]
452 elif vmdkptn.match(output):
453 return maptab["vmdk"]
454 elif ext3fsimgptn.match(output):
455 return "ext3fsimg"
456 elif ext4fsimgptn.match(output):
457 return "ext4fsimg"
458 elif btrfsimgptn.match(output):
459 return "btrfsimg"
460 else:
461 raise CreatorError("Cannot detect the type of image: %s" % path)
462
463
464def get_file_size(filename):
465 """ Return size in MB unit """
466 cmd = ['du', "-s", "-b", "-B", "1M", filename]
467 rc, duOutput = runner.runtool(cmd)
468 if rc != 0:
469 raise CreatorError("Failed to run: %s" % ' '.join(cmd))
470 size1 = int(duOutput.split()[0])
471
472 cmd = ['du', "-s", "-B", "1M", filename]
473 rc, duOutput = runner.runtool(cmd)
474 if rc != 0:
475 raise CreatorError("Failed to run: %s" % ' '.join(cmd))
476
477 size2 = int(duOutput.split()[0])
478 return max(size1, size2)
479
480
481def get_filesystem_avail(fs):
482 vfstat = os.statvfs(fs)
483 return vfstat.f_bavail * vfstat.f_bsize
484
485def convert_image(srcimg, srcfmt, dstimg, dstfmt):
486 #convert disk format
487 if dstfmt != "raw":
488 raise CreatorError("Invalid destination image format: %s" % dstfmt)
489 msger.debug("converting %s image to %s" % (srcimg, dstimg))
490 if srcfmt == "vmdk":
491 path = find_binary_path("qemu-img")
492 argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg]
493 elif srcfmt == "vdi":
494 path = find_binary_path("VBoxManage")
495 argv = [path, "internalcommands", "converttoraw", srcimg, dstimg]
496 else:
497 raise CreatorError("Invalid soure image format: %s" % srcfmt)
498
499 rc = runner.show(argv)
500 if rc == 0:
501 msger.debug("convert successful")
502 if rc != 0:
503 raise CreatorError("Unable to convert disk to %s" % dstfmt)
504
505def uncompress_squashfs(squashfsimg, outdir):
506 """Uncompress file system from squshfs image"""
507 unsquashfs = find_binary_path("unsquashfs")
508 args = [ unsquashfs, "-d", outdir, squashfsimg ]
509 rc = runner.show(args)
510 if (rc != 0):
511 raise SquashfsError("Failed to uncompress %s." % squashfsimg)
512
513def mkdtemp(dir = "/var/tmp", prefix = "wic-tmp-"):
514 """ FIXME: use the dir in wic.conf instead """
515
516 makedirs(dir)
517 return tempfile.mkdtemp(dir = dir, prefix = prefix)
518
519def get_repostrs_from_ks(ks):
520 def _get_temp_reponame(baseurl):
521 md5obj = hashlib.md5(baseurl)
522 tmpreponame = "%s" % md5obj.hexdigest()
523 return tmpreponame
524
525 kickstart_repos = []
526
527 for repodata in ks.handler.repo.repoList:
528 repo = {}
529 for attr in ('name',
530 'baseurl',
531 'mirrorlist',
532 'includepkgs', # val is list
533 'excludepkgs', # val is list
534 'cost', # int
535 'priority',# int
536 'save',
537 'proxy',
538 'proxyuser',
539 'proxypasswd',
540 'proxypasswd',
541 'debuginfo',
542 'source',
543 'gpgkey',
544 'ssl_verify'):
545 if hasattr(repodata, attr) and getattr(repodata, attr):
546 repo[attr] = getattr(repodata, attr)
547
548 if 'name' not in repo:
549 repo['name'] = _get_temp_reponame(repodata.baseurl)
550
551 kickstart_repos.append(repo)
552
553 return kickstart_repos
554
555def _get_uncompressed_data_from_url(url, filename, proxies):
556 filename = myurlgrab(url, filename, proxies)
557 suffix = None
558 if filename.endswith(".gz"):
559 suffix = ".gz"
560 runner.quiet(['gunzip', "-f", filename])
561 elif filename.endswith(".bz2"):
562 suffix = ".bz2"
563 runner.quiet(['bunzip2', "-f", filename])
564 if suffix:
565 filename = filename.replace(suffix, "")
566 return filename
567
568def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename,
569 sumtype=None, checksum=None):
570 url = os.path.join(baseurl, filename)
571 filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename)))
572 if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"):
573 filename = os.path.splitext(filename_tmp)[0]
574 else:
575 filename = filename_tmp
576 if sumtype and checksum and os.path.exists(filename):
577 try:
578 sumcmd = find_binary_path("%ssum" % sumtype)
579 except:
580 file_checksum = None
581 else:
582 file_checksum = runner.outs([sumcmd, filename]).split()[0]
583
584 if file_checksum and file_checksum == checksum:
585 return filename
586
587 return _get_uncompressed_data_from_url(url,filename_tmp,proxies)
588
589def get_metadata_from_repos(repos, cachedir):
590 my_repo_metadata = []
591 for repo in repos:
592 reponame = repo['name']
593 baseurl = repo['baseurl']
594
595
596 if 'proxy' in repo:
597 proxy = repo['proxy']
598 else:
599 proxy = get_proxy_for(baseurl)
600
601 proxies = None
602 if proxy:
603 proxies = {str(baseurl.split(":")[0]):str(proxy)}
604
605 makedirs(os.path.join(cachedir, reponame))
606 url = os.path.join(baseurl, "repodata/repomd.xml")
607 filename = os.path.join(cachedir, reponame, 'repomd.xml')
608 repomd = myurlgrab(url, filename, proxies)
609 try:
610 root = xmlparse(repomd)
611 except SyntaxError:
612 raise CreatorError("repomd.xml syntax error.")
613
614 ns = root.getroot().tag
615 ns = ns[0:ns.rindex("}")+1]
616
617 filepaths = {}
618 checksums = {}
619 sumtypes = {}
620
621 for elm in root.getiterator("%sdata" % ns):
622 if elm.attrib["type"] == "patterns":
623 filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href']
624 checksums['patterns'] = elm.find("%sopen-checksum" % ns).text
625 sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type']
626 break
627
628 for elm in root.getiterator("%sdata" % ns):
629 if elm.attrib["type"] in ("group_gz", "group"):
630 filepaths['comps'] = elm.find("%slocation" % ns).attrib['href']
631 checksums['comps'] = elm.find("%sopen-checksum" % ns).text
632 sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type']
633 break
634
635 primary_type = None
636 for elm in root.getiterator("%sdata" % ns):
637 if elm.attrib["type"] in ("primary_db", "primary"):
638 primary_type = elm.attrib["type"]
639 filepaths['primary'] = elm.find("%slocation" % ns).attrib['href']
640 checksums['primary'] = elm.find("%sopen-checksum" % ns).text
641 sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type']
642 break
643
644 if not primary_type:
645 continue
646
647 for item in ("primary", "patterns", "comps"):
648 if item not in filepaths:
649 filepaths[item] = None
650 continue
651 if not filepaths[item]:
652 continue
653 filepaths[item] = _get_metadata_from_repo(baseurl,
654 proxies,
655 cachedir,
656 reponame,
657 filepaths[item],
658 sumtypes[item],
659 checksums[item])
660
661 """ Get repo key """
662 try:
663 repokey = _get_metadata_from_repo(baseurl,
664 proxies,
665 cachedir,
666 reponame,
667 "repodata/repomd.xml.key")
668 except CreatorError:
669 repokey = None
670 msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key"))
671
672 my_repo_metadata.append({"name":reponame,
673 "baseurl":baseurl,
674 "repomd":repomd,
675 "primary":filepaths['primary'],
676 "cachedir":cachedir,
677 "proxies":proxies,
678 "patterns":filepaths['patterns'],
679 "comps":filepaths['comps'],
680 "repokey":repokey})
681
682 return my_repo_metadata
683
684def get_rpmver_in_repo(repometadata):
685 for repo in repometadata:
686 if repo["primary"].endswith(".xml"):
687 root = xmlparse(repo["primary"])
688 ns = root.getroot().tag
689 ns = ns[0:ns.rindex("}")+1]
690
691 versionlist = []
692 for elm in root.getiterator("%spackage" % ns):
693 if elm.find("%sname" % ns).text == 'rpm':
694 for node in elm.getchildren():
695 if node.tag == "%sversion" % ns:
696 versionlist.append(node.attrib['ver'])
697
698 if versionlist:
699 return reversed(
700 sorted(
701 versionlist,
702 key = lambda ver: map(int, ver.split('.')))).next()
703
704 elif repo["primary"].endswith(".sqlite"):
705 con = sqlite.connect(repo["primary"])
706 for row in con.execute("select version from packages where "
707 "name=\"rpm\" ORDER by version DESC"):
708 con.close()
709 return row[0]
710
711 return None
712
713def get_arch(repometadata):
714 archlist = []
715 for repo in repometadata:
716 if repo["primary"].endswith(".xml"):
717 root = xmlparse(repo["primary"])
718 ns = root.getroot().tag
719 ns = ns[0:ns.rindex("}")+1]
720 for elm in root.getiterator("%spackage" % ns):
721 if elm.find("%sarch" % ns).text not in ("noarch", "src"):
722 arch = elm.find("%sarch" % ns).text
723 if arch not in archlist:
724 archlist.append(arch)
725 elif repo["primary"].endswith(".sqlite"):
726 con = sqlite.connect(repo["primary"])
727 for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"):
728 if row[0] not in archlist:
729 archlist.append(row[0])
730
731 con.close()
732
733 uniq_arch = []
734 for i in range(len(archlist)):
735 if archlist[i] not in rpmmisc.archPolicies.keys():
736 continue
737 need_append = True
738 j = 0
739 while j < len(uniq_arch):
740 if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'):
741 need_append = False
742 break
743 if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'):
744 if need_append:
745 uniq_arch[j] = archlist[i]
746 need_append = False
747 else:
748 uniq_arch.remove(uniq_arch[j])
749 continue
750 j += 1
751 if need_append:
752 uniq_arch.append(archlist[i])
753
754 return uniq_arch, archlist
755
756def get_package(pkg, repometadata, arch = None):
757 ver = ""
758 target_repo = None
759 if not arch:
760 arches = []
761 elif arch not in rpmmisc.archPolicies:
762 arches = [arch]
763 else:
764 arches = rpmmisc.archPolicies[arch].split(':')
765 arches.append('noarch')
766
767 for repo in repometadata:
768 if repo["primary"].endswith(".xml"):
769 root = xmlparse(repo["primary"])
770 ns = root.getroot().tag
771 ns = ns[0:ns.rindex("}")+1]
772 for elm in root.getiterator("%spackage" % ns):
773 if elm.find("%sname" % ns).text == pkg:
774 if elm.find("%sarch" % ns).text in arches:
775 version = elm.find("%sversion" % ns)
776 tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
777 if tmpver > ver:
778 ver = tmpver
779 location = elm.find("%slocation" % ns)
780 pkgpath = "%s" % location.attrib['href']
781 target_repo = repo
782 break
783 if repo["primary"].endswith(".sqlite"):
784 con = sqlite.connect(repo["primary"])
785 if arch:
786 sql = 'select version, release, location_href from packages ' \
787 'where name = "%s" and arch IN ("%s")' % \
788 (pkg, '","'.join(arches))
789 for row in con.execute(sql):
790 tmpver = "%s-%s" % (row[0], row[1])
791 if tmpver > ver:
792 ver = tmpver
793 pkgpath = "%s" % row[2]
794 target_repo = repo
795 break
796 else:
797 sql = 'select version, release, location_href from packages ' \
798 'where name = "%s"' % pkg
799 for row in con.execute(sql):
800 tmpver = "%s-%s" % (row[0], row[1])
801 if tmpver > ver:
802 ver = tmpver
803 pkgpath = "%s" % row[2]
804 target_repo = repo
805 break
806 con.close()
807 if target_repo:
808 makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"]))
809 url = os.path.join(target_repo["baseurl"], pkgpath)
810 filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath)))
811 if os.path.exists(filename):
812 ret = rpmmisc.checkRpmIntegrity('rpm', filename)
813 if ret == 0:
814 return filename
815
816 msger.warning("package %s is damaged: %s" %
817 (os.path.basename(filename), filename))
818 os.unlink(filename)
819
820 pkg = myurlgrab(str(url), filename, target_repo["proxies"])
821 return pkg
822 else:
823 return None
824
825def get_source_name(pkg, repometadata):
826
827 def get_bin_name(pkg):
828 m = RPM_RE.match(pkg)
829 if m:
830 return m.group(1)
831 return None
832
833 def get_src_name(srpm):
834 m = SRPM_RE.match(srpm)
835 if m:
836 return m.group(1)
837 return None
838
839 ver = ""
840 target_repo = None
841
842 pkg_name = get_bin_name(pkg)
843 if not pkg_name:
844 return None
845
846 for repo in repometadata:
847 if repo["primary"].endswith(".xml"):
848 root = xmlparse(repo["primary"])
849 ns = root.getroot().tag
850 ns = ns[0:ns.rindex("}")+1]
851 for elm in root.getiterator("%spackage" % ns):
852 if elm.find("%sname" % ns).text == pkg_name:
853 if elm.find("%sarch" % ns).text != "src":
854 version = elm.find("%sversion" % ns)
855 tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
856 if tmpver > ver:
857 ver = tmpver
858 fmt = elm.find("%sformat" % ns)
859 if fmt:
860 fns = fmt.getchildren()[0].tag
861 fns = fns[0:fns.rindex("}")+1]
862 pkgpath = fmt.find("%ssourcerpm" % fns).text
863 target_repo = repo
864 break
865
866 if repo["primary"].endswith(".sqlite"):
867 con = sqlite.connect(repo["primary"])
868 for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name):
869 tmpver = "%s-%s" % (row[0], row[1])
870 if tmpver > ver:
871 pkgpath = "%s" % row[2]
872 target_repo = repo
873 break
874 con.close()
875 if target_repo:
876 return get_src_name(pkgpath)
877 else:
878 return None
879
880def get_pkglist_in_patterns(group, patterns):
881 found = False
882 pkglist = []
883 try:
884 root = xmlparse(patterns)
885 except SyntaxError:
886 raise SyntaxError("%s syntax error." % patterns)
887
888 for elm in list(root.getroot()):
889 ns = elm.tag
890 ns = ns[0:ns.rindex("}")+1]
891 name = elm.find("%sname" % ns)
892 summary = elm.find("%ssummary" % ns)
893 if name.text == group or summary.text == group:
894 found = True
895 break
896
897 if not found:
898 return pkglist
899
900 found = False
901 for requires in list(elm):
902 if requires.tag.endswith("requires"):
903 found = True
904 break
905
906 if not found:
907 return pkglist
908
909 for pkg in list(requires):
910 pkgname = pkg.attrib["name"]
911 if pkgname not in pkglist:
912 pkglist.append(pkgname)
913
914 return pkglist
915
916def get_pkglist_in_comps(group, comps):
917 found = False
918 pkglist = []
919 try:
920 root = xmlparse(comps)
921 except SyntaxError:
922 raise SyntaxError("%s syntax error." % comps)
923
924 for elm in root.getiterator("group"):
925 id = elm.find("id")
926 name = elm.find("name")
927 if id.text == group or name.text == group:
928 packagelist = elm.find("packagelist")
929 found = True
930 break
931
932 if not found:
933 return pkglist
934
935 for require in elm.getiterator("packagereq"):
936 if require.tag.endswith("packagereq"):
937 pkgname = require.text
938 if pkgname not in pkglist:
939 pkglist.append(pkgname)
940
941 return pkglist
942
943def is_statically_linked(binary):
944 return ", statically linked, " in runner.outs(['file', binary])
945
946def setup_qemu_emulator(rootdir, arch):
947 # mount binfmt_misc if it doesn't exist
948 if not os.path.exists("/proc/sys/fs/binfmt_misc"):
949 modprobecmd = find_binary_path("modprobe")
950 runner.show([modprobecmd, "binfmt_misc"])
951 if not os.path.exists("/proc/sys/fs/binfmt_misc/register"):
952 mountcmd = find_binary_path("mount")
953 runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"])
954
955 # qemu_emulator is a special case, we can't use find_binary_path
956 # qemu emulator should be a statically-linked executable file
957 qemu_emulator = "/usr/bin/qemu-arm"
958 if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator):
959 qemu_emulator = "/usr/bin/qemu-arm-static"
960 if not os.path.exists(qemu_emulator):
961 raise CreatorError("Please install a statically-linked qemu-arm")
962
963 # qemu emulator version check
964 armv7_list = [arch for arch in rpmmisc.archPolicies.keys() if arch.startswith('armv7')]
965 if arch in armv7_list: # need qemu (>=0.13.0)
966 qemuout = runner.outs([qemu_emulator, "-h"])
967 m = re.search("version\s*([.\d]+)", qemuout)
968 if m:
969 qemu_version = m.group(1)
970 if qemu_version < "0.13":
971 raise CreatorError("Requires %s version >=0.13 for %s" % (qemu_emulator, arch))
972 else:
973 msger.warning("Can't get version info of %s, please make sure it's higher than 0.13.0" % qemu_emulator)
974
975 if not os.path.exists(rootdir + "/usr/bin"):
976 makedirs(rootdir + "/usr/bin")
977 shutil.copy(qemu_emulator, rootdir + "/usr/bin/qemu-arm-static")
978 qemu_emulator = "/usr/bin/qemu-arm-static"
979
980 # disable selinux, selinux will block qemu emulator to run
981 if os.path.exists("/usr/sbin/setenforce"):
982 msger.info('Try to disable selinux')
983 runner.show(["/usr/sbin/setenforce", "0"])
984
985 # unregister it if it has been registered and is a dynamically-linked executable
986 node = "/proc/sys/fs/binfmt_misc/arm"
987 if os.path.exists(node):
988 qemu_unregister_string = "-1\n"
989 fd = open("/proc/sys/fs/binfmt_misc/arm", "w")
990 fd.write(qemu_unregister_string)
991 fd.close()
992
993 # register qemu emulator for interpreting other arch executable file
994 if not os.path.exists(node):
995 qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator
996 fd = open("/proc/sys/fs/binfmt_misc/register", "w")
997 fd.write(qemu_arm_string)
998 fd.close()
999
1000 return qemu_emulator
1001
1002def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir):
1003 def get_source_repometadata(repometadata):
1004 src_repometadata=[]
1005 for repo in repometadata:
1006 if repo["name"].endswith("-source"):
1007 src_repometadata.append(repo)
1008 if src_repometadata:
1009 return src_repometadata
1010 return None
1011
1012 def get_src_name(srpm):
1013 m = SRPM_RE.match(srpm)
1014 if m:
1015 return m.group(1)
1016 return None
1017
1018 src_repometadata = get_source_repometadata(repometadata)
1019
1020 if not src_repometadata:
1021 msger.warning("No source repo found")
1022 return None
1023
1024 src_pkgs = []
1025 lpkgs_dict = {}
1026 lpkgs_path = []
1027 for repo in src_repometadata:
1028 cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"])
1029 lpkgs_path += glob.glob(cachepath)
1030
1031 for lpkg in lpkgs_path:
1032 lpkg_name = get_src_name(os.path.basename(lpkg))
1033 lpkgs_dict[lpkg_name] = lpkg
1034 localpkgs = lpkgs_dict.keys()
1035
1036 cached_count = 0
1037 destdir = instroot+'/usr/src/SRPMS'
1038 if not os.path.exists(destdir):
1039 os.makedirs(destdir)
1040
1041 srcpkgset = set()
1042 for _pkg in pkgs:
1043 srcpkg_name = get_source_name(_pkg, repometadata)
1044 if not srcpkg_name:
1045 continue
1046 srcpkgset.add(srcpkg_name)
1047
1048 for pkg in list(srcpkgset):
1049 if pkg in localpkgs:
1050 cached_count += 1
1051 shutil.copy(lpkgs_dict[pkg], destdir)
1052 src_pkgs.append(os.path.basename(lpkgs_dict[pkg]))
1053 else:
1054 src_pkg = get_package(pkg, src_repometadata, 'src')
1055 if src_pkg:
1056 shutil.copy(src_pkg, destdir)
1057 src_pkgs.append(src_pkg)
1058 msger.info("%d source packages gotten from cache" % cached_count)
1059
1060 return src_pkgs
1061
1062def strip_end(text, suffix):
1063 if not text.endswith(suffix):
1064 return text
1065 return text[:-len(suffix)]
diff --git a/scripts/lib/mic/utils/oe/__init__.py b/scripts/lib/mic/utils/oe/__init__.py
new file mode 100644
index 0000000000..d10e802116
--- /dev/null
+++ b/scripts/lib/mic/utils/oe/__init__.py
@@ -0,0 +1,22 @@
1#
2# OpenEmbedded mic utils library
3#
4# Copyright (c) 2013, Intel Corporation.
5# All rights reserved.
6#
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License version 2 as
9# published by the Free Software Foundation.
10#
11# This program is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14# GNU General Public License for more details.
15#
16# You should have received a copy of the GNU General Public License along
17# with this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# AUTHORS
21# Tom Zanussi <tom.zanussi (at] linux.intel.com>
22#
diff --git a/scripts/lib/mic/utils/oe/misc.py b/scripts/lib/mic/utils/oe/misc.py
new file mode 100644
index 0000000000..7ad3aa9685
--- /dev/null
+++ b/scripts/lib/mic/utils/oe/misc.py
@@ -0,0 +1,144 @@
1# ex:ts=4:sw=4:sts=4:et
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
3#
4# Copyright (c) 2013, Intel Corporation.
5# All rights reserved.
6#
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License version 2 as
9# published by the Free Software Foundation.
10#
11# This program is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14# GNU General Public License for more details.
15#
16# You should have received a copy of the GNU General Public License along
17# with this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# DESCRIPTION
21# This module provides a place to collect various mic-related utils
22# for the OpenEmbedded Image Tools.
23#
24# AUTHORS
25# Tom Zanussi <tom.zanussi (at] linux.intel.com>
26#
27
28from mic import msger
29from mic.utils import runner
30
31def exec_cmd(cmd_and_args, as_shell = False, catch = 3):
32 """
33 Execute command, catching stderr, stdout
34
35 Need to execute as_shell if the command uses wildcards
36 """
37 msger.debug("exec_cmd: %s" % cmd_and_args)
38 args = cmd_and_args.split()
39 msger.debug(args)
40
41 if (as_shell):
42 rc, out = runner.runtool(cmd_and_args, catch)
43 else:
44 rc, out = runner.runtool(args, catch)
45 out = out.strip()
46 msger.debug("exec_cmd: output for %s (rc = %d): %s" % \
47 (cmd_and_args, rc, out))
48
49 if rc != 0:
50 # We don't throw exception when return code is not 0, because
51 # parted always fails to reload part table with loop devices. This
52 # prevents us from distinguishing real errors based on return
53 # code.
54 msger.warning("WARNING: %s returned '%s' instead of 0" % (cmd_and_args, rc))
55
56 return (rc, out)
57
58
59def exec_cmd_quiet(cmd_and_args, as_shell = False):
60 """
61 Execute command, catching nothing in the output
62
63 Need to execute as_shell if the command uses wildcards
64 """
65 return exec_cmd(cmd_and_args, as_shell, 0)
66
67
68def exec_native_cmd(cmd_and_args, native_sysroot, catch = 3):
69 """
70 Execute native command, catching stderr, stdout
71
72 Need to execute as_shell if the command uses wildcards
73
74 Always need to execute native commands as_shell
75 """
76 native_paths = \
77 "export PATH=%s/sbin:%s/usr/sbin:%s/usr/bin:$PATH" % \
78 (native_sysroot, native_sysroot, native_sysroot)
79 native_cmd_and_args = "%s;%s" % (native_paths, cmd_and_args)
80 msger.debug("exec_native_cmd: %s" % cmd_and_args)
81
82 args = cmd_and_args.split()
83 msger.debug(args)
84
85 rc, out = exec_cmd(native_cmd_and_args, True, catch)
86
87 if rc == 127: # shell command-not-found
88 msger.error("A native (host) program required to build the image "
89 "was not found (see details above). Please make sure "
90 "it's installed and try again.")
91
92 return (rc, out)
93
94
95def exec_native_cmd_quiet(cmd_and_args, native_sysroot):
96 """
97 Execute native command, catching nothing in the output
98
99 Need to execute as_shell if the command uses wildcards
100
101 Always need to execute native commands as_shell
102 """
103 return exec_native_cmd(cmd_and_args, native_sysroot, 0)
104
105
106# kickstart doesn't support variable substution in commands, so this
107# is our current simplistic scheme for supporting that
108
109wks_vars = dict()
110
111def get_wks_var(key):
112 return wks_vars[key]
113
114def add_wks_var(key, val):
115 wks_vars[key] = val
116
117BOOTDD_EXTRA_SPACE = 16384
118IMAGE_EXTRA_SPACE = 10240
119
120__bitbake_env_lines = ""
121
122def set_bitbake_env_lines(bitbake_env_lines):
123 global __bitbake_env_lines
124 __bitbake_env_lines = bitbake_env_lines
125
126def get_bitbake_env_lines():
127 return __bitbake_env_lines
128
129def get_line_val(line, key):
130 """
131 Extract the value from the VAR="val" string
132 """
133 if line.startswith(key + "="):
134 stripped_line = line.split('=')[1]
135 stripped_line = stripped_line.replace('\"', '')
136 return stripped_line
137 return None
138
139def get_bitbake_var(key):
140 for line in __bitbake_env_lines.split('\n'):
141 if (get_line_val(line, key)):
142 val = get_line_val(line, key)
143 return val
144 return None
diff --git a/scripts/lib/mic/utils/oe/package_manager.py b/scripts/lib/mic/utils/oe/package_manager.py
new file mode 100644
index 0000000000..92ce98e2ce
--- /dev/null
+++ b/scripts/lib/mic/utils/oe/package_manager.py
@@ -0,0 +1,810 @@
1# ex:ts=4:sw=4:sts=4:et
2# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
3#
4# Copyright (c) 2014, Enea AB.
5# All rights reserved.
6#
7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License version 2 as
9# published by the Free Software Foundation.
10#
11# This program is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14# GNU General Public License for more details.
15#
16# You should have received a copy of the GNU General Public License along
17# with this program; if not, write to the Free Software Foundation, Inc.,
18# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19#
20# DESCRIPTION
21# This implements the opkg package manager wrapper as a combination of
22# meta/lib/oe/package_manager.py and bitbake/lib/bb/utils.py files and
23# adaptation of those files to 'wic'.
24#
25# AUTHORS
26# Adrian Calianu <adrian.calianu (at] enea.com>
27#
28# This file incorporates work covered by the following copyright and
29# permission notice:
30#
31# meta/COPYING.GPLv2 (GPLv2)
32# meta/COPYING.MIT (MIT)
33#
34# Copyright (C) 2004 Michael Lauer
35#
36# Permission to use, copy, modify, and/or distribute this software
37# for any purpose with or without fee is hereby granted, provided
38# that the above copyright notice and this permission notice appear
39# in all copies.
40#
41# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
42# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
43# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
44# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
45# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
46# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
47# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
48# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49
50
51from abc import ABCMeta, abstractmethod
52import os
53import glob
54import subprocess
55import shutil
56import multiprocessing
57import re
58import errno
59import fcntl
60
61from mic.utils.oe.misc import *
62from mic import msger
63
64def mkdirhier(directory):
65 """Create a directory like 'mkdir -p', but does not complain if
66 directory already exists like os.makedirs
67 """
68
69 try:
70 os.makedirs(directory)
71 except OSError as e:
72 if e.errno != errno.EEXIST:
73 raise e
74
75def remove(path, recurse=False):
76 """Equivalent to rm -f or rm -rf"""
77 if not path:
78 return
79 if recurse:
80 # shutil.rmtree(name) would be ideal but its too slow
81 subprocess.call(['rm', '-rf'] + glob.glob(path))
82 return
83 for name in glob.glob(path):
84 try:
85 os.unlink(name)
86 except OSError as exc:
87 if exc.errno != errno.ENOENT:
88 raise
89
90def lockfile(name, shared=False, retry=True):
91 """
92 Use the file fn as a lock file, return when the lock has been acquired.
93 Returns a variable to pass to unlockfile().
94 """
95 dirname = os.path.dirname(name)
96 mkdirhier(dirname)
97
98 if not os.access(dirname, os.W_OK):
99 logger.error("Unable to acquire lock '%s', directory is not writable",
100 name)
101 sys.exit(1)
102
103 op = fcntl.LOCK_EX
104 if shared:
105 op = fcntl.LOCK_SH
106 if not retry:
107 op = op | fcntl.LOCK_NB
108
109 while True:
110 # If we leave the lockfiles lying around there is no problem
111 # but we should clean up after ourselves. This gives potential
112 # for races though. To work around this, when we acquire the lock
113 # we check the file we locked was still the lock file on disk.
114 # by comparing inode numbers. If they don't match or the lockfile
115 # no longer exists, we start again.
116
117 # This implementation is unfair since the last person to request the
118 # lock is the most likely to win it.
119
120 try:
121 lf = open(name, 'a+')
122 fileno = lf.fileno()
123 fcntl.flock(fileno, op)
124 statinfo = os.fstat(fileno)
125 if os.path.exists(lf.name):
126 statinfo2 = os.stat(lf.name)
127 if statinfo.st_ino == statinfo2.st_ino:
128 return lf
129 lf.close()
130 except Exception:
131 try:
132 lf.close()
133 except Exception:
134 pass
135 pass
136 if not retry:
137 return None
138
139def unlockfile(lf):
140 """
141 Unlock a file locked using lockfile()
142 """
143 try:
144 # If we had a shared lock, we need to promote to exclusive before
145 # removing the lockfile. Attempt this, ignore failures.
146 fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
147 os.unlink(lf.name)
148 except (IOError, OSError):
149 pass
150 fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
151 lf.close()
152
153def which(path, item, direction = 0, history = False):
154 """
155 Locate a file in a PATH
156 """
157
158 hist = []
159 paths = (path or "").split(':')
160 if direction != 0:
161 paths.reverse()
162
163 for p in paths:
164 next = os.path.join(p, item)
165 hist.append(next)
166 if os.path.exists(next):
167 if not os.path.isabs(next):
168 next = os.path.abspath(next)
169 if history:
170 return next, hist
171 return next
172
173 if history:
174 return "", hist
175 return ""
176
177
178
179# this can be used by all PM backends to create the index files in parallel
180def wic_create_index(arg):
181 index_cmd = arg
182
183 try:
184 msger.info("Executing '%s' ..." % index_cmd)
185 subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
186 except subprocess.CalledProcessError as e:
187 return("Index creation command '%s' failed with return code %d:\n%s" %
188 (e.cmd, e.returncode, e.output))
189
190 return None
191
192
193class WicIndexer(object):
194 __metaclass__ = ABCMeta
195
196 def __init__(self, d, deploy_dir):
197 self.d = d
198 self.deploy_dir = deploy_dir
199
200 @abstractmethod
201 def write_index(self):
202 pass
203
204class WicOpkgIndexer(WicIndexer):
205 def write_index(self):
206 arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
207 "SDK_PACKAGE_ARCHS",
208 "MULTILIB_ARCHS"]
209
210 opkg_index_cmd = which(os.getenv('PATH'), "opkg-make-index")
211
212 if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
213 open(os.path.join(self.deploy_dir, "Packages"), "w").close()
214
215 index_cmds = []
216 for arch_var in arch_vars:
217 if self.d.has_key(arch_var):
218 archs = self.d[arch_var]
219 else:
220 archs = None
221
222 if archs is None:
223 continue
224
225 for arch in archs.split():
226 pkgs_dir = os.path.join(self.deploy_dir, arch)
227 pkgs_file = os.path.join(pkgs_dir, "Packages")
228
229 if not os.path.isdir(pkgs_dir):
230 continue
231
232 if not os.path.exists(pkgs_file):
233 open(pkgs_file, "w").close()
234
235 index_cmds.append('%s -r %s -p %s -m %s' %
236 (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
237
238 if len(index_cmds) == 0:
239 msger.info("There are no packages in %s!" % self.deploy_dir)
240 return
241
242 nproc = multiprocessing.cpu_count()
243 pool = multiprocessing.Pool(nproc)
244 results = list(pool.imap(wic_create_index, index_cmds))
245 pool.close()
246 pool.join()
247
248 for result in results:
249 if result is not None:
250 return(result)
251
252class WicPkgsList(object):
253 __metaclass__ = ABCMeta
254
255 def __init__(self, d, rootfs_dir):
256 self.d = d
257 self.rootfs_dir = rootfs_dir
258
259 @abstractmethod
260 def list(self, format=None):
261 pass
262
263
264class WicOpkgPkgsList(WicPkgsList):
265 def __init__(self, d, rootfs_dir, config_file):
266 super(WicOpkgPkgsList, self).__init__(d, rootfs_dir)
267
268 self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl")
269 self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
270 if self.d.has_key("OPKG_ARGS"):
271 self.opkg_args += self.d["OPKG_ARGS"]
272
273 def list(self, format=None):
274 opkg_query_cmd = which(os.getenv('PATH'), "opkg-query-helper.py")
275
276 if format == "arch":
277 cmd = "%s %s status | %s -a" % \
278 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
279 elif format == "file":
280 cmd = "%s %s status | %s -f" % \
281 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
282 elif format == "ver":
283 cmd = "%s %s status | %s -v" % \
284 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
285 elif format == "deps":
286 cmd = "%s %s status | %s" % \
287 (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
288 else:
289 cmd = "%s %s list_installed | cut -d' ' -f1" % \
290 (self.opkg_cmd, self.opkg_args)
291
292 try:
293 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
294 except subprocess.CalledProcessError as e:
295 msger.error("Cannot get the installed packages list. Command '%s' "
296 "returned %d:\n%s" % (cmd, e.returncode, e.output))
297
298 if output and format == "file":
299 tmp_output = ""
300 for line in output.split('\n'):
301 pkg, pkg_file, pkg_arch = line.split()
302 full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
303 if os.path.exists(full_path):
304 tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
305 else:
306 tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
307
308 output = tmp_output
309
310 return output
311
312
313class WicPackageManager(object):
314 """
315 This is an abstract class. Do not instantiate this directly.
316 """
317 __metaclass__ = ABCMeta
318
319 def __init__(self, d, pseudo, native_sysroot):
320 self.d = d
321 self.deploy_dir = None
322 self.deploy_lock = None
323 if self.d.has_key('PACKAGE_FEED_URIS'):
324 self.feed_uris = self.d['PACKAGE_FEED_URIS']
325 else:
326 self.feed_uris = ""
327 self.pseudo = pseudo
328 self.native_sysroot = native_sysroot
329
330 """
331 Update the package manager package database.
332 """
333 @abstractmethod
334 def update(self):
335 pass
336
337 """
338 Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
339 True, installation failures are ignored.
340 """
341 @abstractmethod
342 def install(self, pkgs, attempt_only=False):
343 pass
344
345 """
346 Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
347 is False, the any dependencies are left in place.
348 """
349 @abstractmethod
350 def remove(self, pkgs, with_dependencies=True):
351 pass
352
353 """
354 This function creates the index files
355 """
356 @abstractmethod
357 def write_index(self):
358 pass
359
360 @abstractmethod
361 def remove_packaging_data(self):
362 pass
363
364 @abstractmethod
365 def list_installed(self, format=None):
366 pass
367
368 @abstractmethod
369 def insert_feeds_uris(self):
370 pass
371
372 """
373 Install complementary packages based upon the list of currently installed
374 packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
375 these packages, if they don't exist then no error will occur. Note: every
376 backend needs to call this function explicitly after the normal package
377 installation
378 """
379 def install_complementary(self, globs=None):
380 # we need to write the list of installed packages to a file because the
381 # oe-pkgdata-util reads it from a file
382 if self.d.has_key('WORKDIR'):
383 installed_pkgs_file = os.path.join(self.d['WORKDIR'],
384 "installed_pkgs.txt")
385 else:
386 msger.error("No WORKDIR provided!")
387
388 with open(installed_pkgs_file, "w+") as installed_pkgs:
389 installed_pkgs.write(self.list_installed("arch"))
390
391 if globs is None:
392 if self.d.has_key('IMAGE_INSTALL_COMPLEMENTARY'):
393 globs = self.d['IMAGE_INSTALL_COMPLEMENTARY']
394 split_linguas = set()
395
396 if self.d.has_key('IMAGE_LINGUAS'):
397 for translation in self.d['IMAGE_LINGUAS'].split():
398 split_linguas.add(translation)
399 split_linguas.add(translation.split('-')[0])
400
401 split_linguas = sorted(split_linguas)
402
403 for lang in split_linguas:
404 globs += " *-locale-%s" % lang
405
406 if globs is None:
407 return
408
409 if not self.d.has_key('PKGDATA_DIR'):
410 msger.error("No PKGDATA_DIR provided!")
411
412 cmd = [which(os.getenv('PATH'), "oe-pkgdata-util"),
413 "glob", self.d['PKGDATA_DIR'], installed_pkgs_file,
414 globs]
415
416 rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot)
417 if rc != 0:
418 msger.error("Could not compute complementary packages list. Command "
419 "'%s' returned %d" %
420 (' '.join(cmd), rc))
421
422 self.install(out.split(), attempt_only=True)
423
424
425 def deploy_dir_lock(self):
426 if self.deploy_dir is None:
427 raise RuntimeError("deploy_dir is not set!")
428
429 lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
430
431 self.deploy_lock = lockfile(lock_file_name)
432
433 def deploy_dir_unlock(self):
434 if self.deploy_lock is None:
435 return
436
437 unlockfile(self.deploy_lock)
438
439 self.deploy_lock = None
440
441
442class WicOpkgPM(WicPackageManager):
443 def __init__(self, d, target_rootfs, config_file, archs, pseudo, native_sysroot, task_name='target'):
444 super(WicOpkgPM, self).__init__(d, pseudo, native_sysroot)
445
446 self.target_rootfs = target_rootfs
447 self.config_file = config_file
448 self.pkg_archs = archs
449 self.task_name = task_name
450
451 if self.d.has_key("DEPLOY_DIR_IPK"):
452 self.deploy_dir = self.d["DEPLOY_DIR_IPK"]
453
454 self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
455 self.opkg_cmd = which(os.getenv('PATH'), "opkg-cl")
456 self.opkg_args = "-f %s -o %s " % (self.config_file, target_rootfs)
457 if self.d.has_key("OPKG_ARGS"):
458 self.opkg_args += self.d["OPKG_ARGS"]
459
460 if self.d.has_key('OPKGLIBDIR'):
461 opkg_lib_dir = self.d['OPKGLIBDIR']
462 else:
463 opkg_lib_dir = ""
464
465 if opkg_lib_dir[0] == "/":
466 opkg_lib_dir = opkg_lib_dir[1:]
467
468 self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
469
470 mkdirhier(self.opkg_dir)
471
472 if self.d.has_key("TMPDIR"):
473 tmp_dir = self.d["TMPDIR"]
474 else:
475 tmp_dir = ""
476
477 self.saved_opkg_dir = '%s/saved/%s' % (tmp_dir, self.task_name)
478 if not os.path.exists('%s/saved' % tmp_dir):
479 mkdirhier('%s/saved' % tmp_dir)
480
481 if self.d.has_key('BUILD_IMAGES_FROM_FEEDS') and self.d['BUILD_IMAGES_FROM_FEEDS'] != "1":
482 self._create_config()
483 else:
484 self._create_custom_config()
485
486 self.indexer = WicOpkgIndexer(self.d, self.deploy_dir)
487
488 """
489 This function will change a package's status in /var/lib/opkg/status file.
490 If 'packages' is None then the new_status will be applied to all
491 packages
492 """
493 def mark_packages(self, status_tag, packages=None):
494 status_file = os.path.join(self.opkg_dir, "status")
495
496 with open(status_file, "r") as sf:
497 with open(status_file + ".tmp", "w+") as tmp_sf:
498 if packages is None:
499 tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
500 r"Package: \1\n\2Status: \3%s" % status_tag,
501 sf.read()))
502 else:
503 if type(packages).__name__ != "list":
504 raise TypeError("'packages' should be a list object")
505
506 status = sf.read()
507 for pkg in packages:
508 status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
509 r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
510 status)
511
512 tmp_sf.write(status)
513
514 os.rename(status_file + ".tmp", status_file)
515
516 def _create_custom_config(self):
517 msger.info("Building from feeds activated!")
518
519 with open(self.config_file, "w+") as config_file:
520 priority = 1
521 for arch in self.pkg_archs.split():
522 config_file.write("arch %s %d\n" % (arch, priority))
523 priority += 5
524
525 if self.d.has_key('IPK_FEED_URIS'):
526 ipk_feed_uris = self.d['IPK_FEED_URIS']
527 else:
528 ipk_feed_uris = ""
529
530 for line in ipk_feed_uris.split():
531 feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
532
533 if feed_match is not None:
534 feed_name = feed_match.group(1)
535 feed_uri = feed_match.group(2)
536
537 msger.info("Add %s feed with URL %s" % (feed_name, feed_uri))
538
539 config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
540
541 """
542 Allow to use package deploy directory contents as quick devel-testing
543 feed. This creates individual feed configs for each arch subdir of those
544 specified as compatible for the current machine.
545 NOTE: Development-helper feature, NOT a full-fledged feed.
546 """
547 if self.d.has_key('FEED_DEPLOYDIR_BASE_URI'):
548 feed_deploydir_base_dir = self.d['FEED_DEPLOYDIR_BASE_URI']
549 else:
550 feed_deploydir_base_dir = ""
551
552 if feed_deploydir_base_dir != "":
553 for arch in self.pkg_archs.split():
554 if self.d.has_key("sysconfdir"):
555 sysconfdir = self.d["sysconfdir"]
556 else:
557 sysconfdir = None
558
559 cfg_file_name = os.path.join(self.target_rootfs,
560 sysconfdir,
561 "opkg",
562 "local-%s-feed.conf" % arch)
563
564 with open(cfg_file_name, "w+") as cfg_file:
565 cfg_file.write("src/gz local-%s %s/%s" %
566 arch,
567 feed_deploydir_base_dir,
568 arch)
569
570 def _create_config(self):
571 with open(self.config_file, "w+") as config_file:
572 priority = 1
573 for arch in self.pkg_archs.split():
574 config_file.write("arch %s %d\n" % (arch, priority))
575 priority += 5
576
577 config_file.write("src oe file:%s\n" % self.deploy_dir)
578
579 for arch in self.pkg_archs.split():
580 pkgs_dir = os.path.join(self.deploy_dir, arch)
581 if os.path.isdir(pkgs_dir):
582 config_file.write("src oe-%s file:%s\n" %
583 (arch, pkgs_dir))
584
585 def insert_feeds_uris(self):
586 if self.feed_uris == "":
587 return
588
589 rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
590 % self.target_rootfs)
591
592 with open(rootfs_config, "w+") as config_file:
593 uri_iterator = 0
594 for uri in self.feed_uris.split():
595 config_file.write("src/gz url-%d %s/ipk\n" %
596 (uri_iterator, uri))
597
598 for arch in self.pkg_archs.split():
599 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
600 continue
601 msger.info('Note: adding opkg channel url-%s-%d (%s)' %
602 (arch, uri_iterator, uri))
603
604 config_file.write("src/gz uri-%s-%d %s/ipk/%s\n" %
605 (arch, uri_iterator, uri, arch))
606 uri_iterator += 1
607
608 def update(self):
609 self.deploy_dir_lock()
610
611 cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
612
613 rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot)
614 if rc != 0:
615 self.deploy_dir_unlock()
616 msger.error("Unable to update the package index files. Command '%s' "
617 "returned %d" % (cmd, rc))
618
619 self.deploy_dir_unlock()
620
621 def install(self, pkgs, attempt_only=False):
622 if attempt_only and len(pkgs) == 0:
623 return
624
625 cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
626
627 os.environ['D'] = self.target_rootfs
628 os.environ['OFFLINE_ROOT'] = self.target_rootfs
629 os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
630 os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
631 if self.d.has_key('WORKDIR'):
632 os.environ['INTERCEPT_DIR'] = os.path.join(self.d['WORKDIR'],
633 "intercept_scripts")
634 else:
635 os.environ['INTERCEPT_DIR'] = "."
636 msger.warning("No WORKDIR provided!")
637
638 if self.d.has_key('STAGING_DIR_NATIVE'):
639 os.environ['NATIVE_ROOT'] = self.d['STAGING_DIR_NATIVE']
640 else:
641 msger.error("No STAGING_DIR_NATIVE provided!")
642
643 rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot)
644 if rc != 0:
645 msger.error("Unable to install packages. "
646 "Command '%s' returned %d" % (cmd, rc))
647
648
649 def remove(self, pkgs, with_dependencies=True):
650 if with_dependencies:
651 cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
652 (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
653 else:
654 cmd = "%s %s --force-depends remove %s" % \
655 (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
656
657 rc, out = exec_native_cmd(self.pseudo + cmd, self.native_sysroot)
658 if rc != 0:
659 msger.error("Unable to remove packages. Command '%s' "
660 "returned %d" % (cmd, rc))
661
662
663 def write_index(self):
664 self.deploy_dir_lock()
665
666 result = self.indexer.write_index()
667
668 self.deploy_dir_unlock()
669
670 if result is not None:
671 msger.error(result)
672
673 def remove_packaging_data(self):
674 remove(self.opkg_dir, True)
675 # create the directory back, it's needed by PM lock
676 mkdirhier(self.opkg_dir)
677
678 def list_installed(self, format=None):
679 return WicOpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format)
680
681 def handle_bad_recommendations(self):
682 if self.d.has_key("BAD_RECOMMENDATIONS"):
683 bad_recommendations = self.d["BAD_RECOMMENDATIONS"]
684 else:
685 bad_recommendations = ""
686
687 if bad_recommendations.strip() == "":
688 return
689
690 status_file = os.path.join(self.opkg_dir, "status")
691
692 # If status file existed, it means the bad recommendations has already
693 # been handled
694 if os.path.exists(status_file):
695 return
696
697 cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
698
699 with open(status_file, "w+") as status:
700 for pkg in bad_recommendations.split():
701 pkg_info = cmd + pkg
702
703 try:
704 output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip()
705 except subprocess.CalledProcessError as e:
706 msger.error("Cannot get package info. Command '%s' "
707 "returned %d:\n%s" % (pkg_info, e.returncode, e.output))
708
709 if output == "":
710 msger.info("Ignored bad recommendation: '%s' is "
711 "not a package" % pkg)
712 continue
713
714 for line in output.split('\n'):
715 if line.startswith("Status:"):
716 status.write("Status: deinstall hold not-installed\n")
717 else:
718 status.write(line + "\n")
719
720 '''
721 The following function dummy installs pkgs and returns the log of output.
722 '''
723 def dummy_install(self, pkgs):
724 if len(pkgs) == 0:
725 return
726
727 # Create an temp dir as opkg root for dummy installation
728 if self.d.has_key("TMPDIR"):
729 tmp_dir = self.d["TMPDIR"]
730 else:
731 tmp_dir = "."
732 msger.warning("No TMPDIR provided!")
733
734 temp_rootfs = '%s/opkg' % tmp_dir
735 temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
736 mkdirhier(temp_opkg_dir)
737
738 opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
739 if self.d.has_key("OPKG_ARGS"):
740 opkg_args += self.d["OPKG_ARGS"]
741
742 cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
743 try:
744 subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
745 except subprocess.CalledProcessError as e:
746 msger.error("Unable to update. Command '%s' "
747 "returned %d:\n%s" % (cmd, e.returncode, e.output))
748
749 # Dummy installation
750 cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
751 opkg_args,
752 ' '.join(pkgs))
753 try:
754 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
755 except subprocess.CalledProcessError as e:
756 msger.error("Unable to dummy install packages. Command '%s' "
757 "returned %d:\n%s" % (cmd, e.returncode, e.output))
758
759 remove(temp_rootfs, True)
760
761 return output
762
763 def backup_packaging_data(self):
764 # Save the opkglib for increment ipk image generation
765 if os.path.exists(self.saved_opkg_dir):
766 remove(self.saved_opkg_dir, True)
767 shutil.copytree(self.opkg_dir,
768 self.saved_opkg_dir,
769 symlinks=True)
770
771 def recover_packaging_data(self):
772 # Move the opkglib back
773 if os.path.exists(self.saved_opkg_dir):
774 if os.path.exists(self.opkg_dir):
775 remove(self.opkg_dir, True)
776
777 msger.info('Recover packaging data')
778 shutil.copytree(self.saved_opkg_dir,
779 self.opkg_dir,
780 symlinks=True)
781
782
783def wic_generate_index_files(d):
784 if d.has_key('PACKAGE_CLASSES'):
785 classes = d['PACKAGE_CLASSES'].replace("package_", "").split()
786 else:
787 classes = ""
788 msger.warning("No PACKAGE_CLASSES provided!")
789
790 if d.has_key('DEPLOY_DIR_IPK'):
791 deploy_dir_ipk = d['DEPLOY_DIR_IPK']
792 else:
793 deploy_dir_ipk = None
794 msger.warning("No DEPLOY_DIR_IPK provided!")
795
796 indexer_map = {
797 "ipk": (WicOpkgIndexer, deploy_dir_ipk)
798 }
799
800 result = None
801
802 for pkg_class in classes:
803 if not pkg_class in indexer_map:
804 continue
805
806 if os.path.exists(indexer_map[pkg_class][1]):
807 result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
808
809 if result is not None:
810 msger.error(result)
diff --git a/scripts/lib/mic/utils/partitionedfs.py b/scripts/lib/mic/utils/partitionedfs.py
new file mode 100644
index 0000000000..6607466a83
--- /dev/null
+++ b/scripts/lib/mic/utils/partitionedfs.py
@@ -0,0 +1,782 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2009, 2010, 2011 Intel, Inc.
4# Copyright (c) 2007, 2008 Red Hat, Inc.
5# Copyright (c) 2008 Daniel P. Berrange
6# Copyright (c) 2008 David P. Huff
7#
8# This program is free software; you can redistribute it and/or modify it
9# under the terms of the GNU General Public License as published by the Free
10# Software Foundation; version 2 of the License
11#
12# This program is distributed in the hope that it will be useful, but
13# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15# for more details.
16#
17# You should have received a copy of the GNU General Public License along
18# with this program; if not, write to the Free Software Foundation, Inc., 59
19# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
21import os
22
23from mic import msger
24from mic.utils import runner
25from mic.utils.errors import MountError
26from mic.utils.fs_related import *
27from mic.utils.gpt_parser import GptParser
28from mic.utils.oe.misc import *
29
30# Overhead of the MBR partitioning scheme (just one sector)
31MBR_OVERHEAD = 1
32# Overhead of the GPT partitioning scheme
33GPT_OVERHEAD = 34
34
35# Size of a sector in bytes
36SECTOR_SIZE = 512
37
38class PartitionedMount(Mount):
39 def __init__(self, mountdir, skipformat = False):
40 Mount.__init__(self, mountdir)
41 self.disks = {}
42 self.partitions = []
43 self.subvolumes = []
44 self.mapped = False
45 self.mountOrder = []
46 self.unmountOrder = []
47 self.parted = find_binary_path("parted")
48 self.btrfscmd=None
49 self.skipformat = skipformat
50 self.snapshot_created = self.skipformat
51 # Size of a sector used in calculations
52 self.sector_size = SECTOR_SIZE
53 self._partitions_layed_out = False
54
55 def __add_disk(self, disk_name):
56 """ Add a disk 'disk_name' to the internal list of disks. Note,
57 'disk_name' is the name of the disk in the target system
58 (e.g., sdb). """
59
60 if disk_name in self.disks:
61 # We already have this disk
62 return
63
64 assert not self._partitions_layed_out
65
66 self.disks[disk_name] = \
67 { 'disk': None, # Disk object
68 'mapped': False, # True if kpartx mapping exists
69 'numpart': 0, # Number of allocate partitions
70 'partitions': [], # Indexes to self.partitions
71 'offset': 0, # Offset of next partition (in sectors)
72 # Minimum required disk size to fit all partitions (in bytes)
73 'min_size': 0,
74 'ptable_format': "msdos" } # Partition table format
75
76 def add_disk(self, disk_name, disk_obj):
77 """ Add a disk object which have to be partitioned. More than one disk
78 can be added. In case of multiple disks, disk partitions have to be
79 added for each disk separately with 'add_partition()". """
80
81 self.__add_disk(disk_name)
82 self.disks[disk_name]['disk'] = disk_obj
83
84 def __add_partition(self, part):
85 """ This is a helper function for 'add_partition()' which adds a
86 partition to the internal list of partitions. """
87
88 assert not self._partitions_layed_out
89
90 self.partitions.append(part)
91 self.__add_disk(part['disk_name'])
92
93 def add_partition(self, size, disk_name, mountpoint, source_file = None, fstype = None,
94 label=None, fsopts = None, boot = False, align = None,
95 part_type = None):
96 """ Add the next partition. Prtitions have to be added in the
97 first-to-last order. """
98
99 ks_pnum = len(self.partitions)
100
101 # Converting MB to sectors for parted
102 size = size * 1024 * 1024 / self.sector_size
103
104 # We need to handle subvolumes for btrfs
105 if fstype == "btrfs" and fsopts and fsopts.find("subvol=") != -1:
106 self.btrfscmd=find_binary_path("btrfs")
107 subvol = None
108 opts = fsopts.split(",")
109 for opt in opts:
110 if opt.find("subvol=") != -1:
111 subvol = opt.replace("subvol=", "").strip()
112 break
113 if not subvol:
114 raise MountError("No subvolume: %s" % fsopts)
115 self.subvolumes.append({'size': size, # In sectors
116 'mountpoint': mountpoint, # Mount relative to chroot
117 'fstype': fstype, # Filesystem type
118 'fsopts': fsopts, # Filesystem mount options
119 'disk_name': disk_name, # physical disk name holding partition
120 'device': None, # kpartx device node for partition
121 'mount': None, # Mount object
122 'subvol': subvol, # Subvolume name
123 'boot': boot, # Bootable flag
124 'mounted': False # Mount flag
125 })
126
127 # We still need partition for "/" or non-subvolume
128 if mountpoint == "/" or not fsopts or fsopts.find("subvol=") == -1:
129 # Don't need subvolume for "/" because it will be set as default subvolume
130 if fsopts and fsopts.find("subvol=") != -1:
131 opts = fsopts.split(",")
132 for opt in opts:
133 if opt.strip().startswith("subvol="):
134 opts.remove(opt)
135 break
136 fsopts = ",".join(opts)
137
138 part = { 'ks_pnum' : ks_pnum, # Partition number in the KS file
139 'size': size, # In sectors
140 'mountpoint': mountpoint, # Mount relative to chroot
141 'source_file': source_file, # partition contents
142 'fstype': fstype, # Filesystem type
143 'fsopts': fsopts, # Filesystem mount options
144 'label': label, # Partition label
145 'disk_name': disk_name, # physical disk name holding partition
146 'device': None, # kpartx device node for partition
147 'mount': None, # Mount object
148 'num': None, # Partition number
149 'boot': boot, # Bootable flag
150 'align': align, # Partition alignment
151 'part_type' : part_type, # Partition type
152 'partuuid': None } # Partition UUID (GPT-only)
153
154 self.__add_partition(part)
155
156 def layout_partitions(self, ptable_format = "msdos"):
157 """ Layout the partitions, meaning calculate the position of every
158 partition on the disk. The 'ptable_format' parameter defines the
159 partition table format, and may be either "msdos" or "gpt". """
160
161 msger.debug("Assigning %s partitions to disks" % ptable_format)
162
163 if ptable_format not in ('msdos', 'gpt'):
164 raise MountError("Unknown partition table format '%s', supported " \
165 "formats are: 'msdos' and 'gpt'" % ptable_format)
166
167 if self._partitions_layed_out:
168 return
169
170 self._partitions_layed_out = True
171
172 # Go through partitions in the order they are added in .ks file
173 for n in range(len(self.partitions)):
174 p = self.partitions[n]
175
176 if not self.disks.has_key(p['disk_name']):
177 raise MountError("No disk %s for partition %s" \
178 % (p['disk_name'], p['mountpoint']))
179
180 if p['part_type'] and ptable_format != 'gpt':
181 # The --part-type can also be implemented for MBR partitions,
182 # in which case it would map to the 1-byte "partition type"
183 # filed at offset 3 of the partition entry.
184 raise MountError("setting custom partition type is only " \
185 "imlemented for GPT partitions")
186
187 # Get the disk where the partition is located
188 d = self.disks[p['disk_name']]
189 d['numpart'] += 1
190 d['ptable_format'] = ptable_format
191
192 if d['numpart'] == 1:
193 if ptable_format == "msdos":
194 overhead = MBR_OVERHEAD
195 else:
196 overhead = GPT_OVERHEAD
197
198 # Skip one sector required for the partitioning scheme overhead
199 d['offset'] += overhead
200 # Steal few sectors from the first partition to offset for the
201 # partitioning overhead
202 p['size'] -= overhead
203
204 if p['align']:
205 # If not first partition and we do have alignment set we need
206 # to align the partition.
207 # FIXME: This leaves a empty spaces to the disk. To fill the
208 # gaps we could enlargea the previous partition?
209
210 # Calc how much the alignment is off.
211 align_sectors = d['offset'] % (p['align'] * 1024 / self.sector_size)
212 # We need to move forward to the next alignment point
213 align_sectors = (p['align'] * 1024 / self.sector_size) - align_sectors
214
215 msger.debug("Realignment for %s%s with %s sectors, original"
216 " offset %s, target alignment is %sK." %
217 (p['disk_name'], d['numpart'], align_sectors,
218 d['offset'], p['align']))
219
220 # increase the offset so we actually start the partition on right alignment
221 d['offset'] += align_sectors
222
223 p['start'] = d['offset']
224 d['offset'] += p['size']
225
226 p['type'] = 'primary'
227 p['num'] = d['numpart']
228
229 if d['ptable_format'] == "msdos":
230 if d['numpart'] > 2:
231 # Every logical partition requires an additional sector for
232 # the EBR, so steal the last sector from the end of each
233 # partition starting from the 3rd one for the EBR. This
234 # will make sure the logical partitions are aligned
235 # correctly.
236 p['size'] -= 1
237
238 if d['numpart'] > 3:
239 p['type'] = 'logical'
240 p['num'] = d['numpart'] + 1
241
242 d['partitions'].append(n)
243 msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
244 "sectors (%d bytes)." \
245 % (p['mountpoint'], p['disk_name'], p['num'],
246 p['start'], p['start'] + p['size'] - 1,
247 p['size'], p['size'] * self.sector_size))
248
249 # Once all the partitions have been layed out, we can calculate the
250 # minumim disk sizes.
251 for disk_name, d in self.disks.items():
252 d['min_size'] = d['offset']
253 if d['ptable_format'] == 'gpt':
254 # Account for the backup partition table at the end of the disk
255 d['min_size'] += GPT_OVERHEAD
256
257 d['min_size'] *= self.sector_size
258
259 def __run_parted(self, args):
260 """ Run parted with arguments specified in the 'args' list. """
261
262 args.insert(0, self.parted)
263 msger.debug(args)
264
265 rc, out = runner.runtool(args, catch = 3)
266 out = out.strip()
267 if out:
268 msger.debug('"parted" output: %s' % out)
269
270 if rc != 0:
271 # We don't throw exception when return code is not 0, because
272 # parted always fails to reload part table with loop devices. This
273 # prevents us from distinguishing real errors based on return
274 # code.
275 msger.debug("WARNING: parted returned '%s' instead of 0" % rc)
276
277 def __create_partition(self, device, parttype, fstype, start, size):
278 """ Create a partition on an image described by the 'device' object. """
279
280 # Start is included to the size so we need to substract one from the end.
281 end = start + size - 1
282 msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
283 (parttype, start, end, size))
284
285 args = ["-s", device, "unit", "s", "mkpart", parttype]
286 if fstype:
287 args.extend([fstype])
288 args.extend(["%d" % start, "%d" % end])
289
290 return self.__run_parted(args)
291
292 def __format_disks(self):
293 self.layout_partitions()
294
295 if self.skipformat:
296 msger.debug("Skipping disk format, because skipformat flag is set.")
297 return
298
299 for dev in self.disks.keys():
300 d = self.disks[dev]
301 msger.debug("Initializing partition table for %s" % \
302 (d['disk'].device))
303 self.__run_parted(["-s", d['disk'].device, "mklabel",
304 d['ptable_format']])
305
306 msger.debug("Creating partitions")
307
308 for p in self.partitions:
309 d = self.disks[p['disk_name']]
310 if d['ptable_format'] == "msdos" and p['num'] == 5:
311 # The last sector of the 3rd partition was reserved for the EBR
312 # of the first _logical_ partition. This is why the extended
313 # partition should start one sector before the first logical
314 # partition.
315 self.__create_partition(d['disk'].device, "extended",
316 None, p['start'] - 1,
317 d['offset'] - p['start'])
318
319 if p['fstype'] == "swap":
320 parted_fs_type = "linux-swap"
321 elif p['fstype'] == "vfat":
322 parted_fs_type = "fat32"
323 elif p['fstype'] == "msdos":
324 parted_fs_type = "fat16"
325 else:
326 # Type for ext2/ext3/ext4/btrfs
327 parted_fs_type = "ext2"
328
329 # Boot ROM of OMAP boards require vfat boot partition to have an
330 # even number of sectors.
331 if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat", "msdos"] \
332 and p['size'] % 2:
333 msger.debug("Substracting one sector from '%s' partition to " \
334 "get even number of sectors for the partition" % \
335 p['mountpoint'])
336 p['size'] -= 1
337
338 self.__create_partition(d['disk'].device, p['type'],
339 parted_fs_type, p['start'], p['size'])
340
341 if p['boot']:
342 if d['ptable_format'] == 'gpt':
343 flag_name = "legacy_boot"
344 else:
345 flag_name = "boot"
346 msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
347 (flag_name, p['num'], d['disk'].device))
348 self.__run_parted(["-s", d['disk'].device, "set",
349 "%d" % p['num'], flag_name, "on"])
350
351 # Parted defaults to enabling the lba flag for fat16 partitions,
352 # which causes compatibility issues with some firmware (and really
353 # isn't necessary).
354 if parted_fs_type == "fat16":
355 if d['ptable_format'] == 'msdos':
356 msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \
357 (p['num'], d['disk'].device))
358 self.__run_parted(["-s", d['disk'].device, "set",
359 "%d" % p['num'], "lba", "off"])
360
361 # If the partition table format is "gpt", find out PARTUUIDs for all
362 # the partitions. And if users specified custom parition type UUIDs,
363 # set them.
364 for disk_name, disk in self.disks.items():
365 if disk['ptable_format'] != 'gpt':
366 continue
367
368 pnum = 0
369 gpt_parser = GptParser(d['disk'].device, SECTOR_SIZE)
370 # Iterate over all GPT partitions on this disk
371 for entry in gpt_parser.get_partitions():
372 pnum += 1
373 # Find the matching partition in the 'self.partitions' list
374 for n in d['partitions']:
375 p = self.partitions[n]
376 if p['num'] == pnum:
377 # Found, fetch PARTUUID (partition's unique ID)
378 p['partuuid'] = entry['part_uuid']
379 msger.debug("PARTUUID for partition %d on disk '%s' " \
380 "(mount point '%s') is '%s'" % (pnum, \
381 disk_name, p['mountpoint'], p['partuuid']))
382 if p['part_type']:
383 entry['type_uuid'] = p['part_type']
384 msger.debug("Change type of partition %d on disk " \
385 "'%s' (mount point '%s') to '%s'" % \
386 (pnum, disk_name, p['mountpoint'],
387 p['part_type']))
388 gpt_parser.change_partition(entry)
389
390 del gpt_parser
391
392 def __map_partitions(self):
393 """Load it if dm_snapshot isn't loaded. """
394 load_module("dm_snapshot")
395
396 for dev in self.disks.keys():
397 d = self.disks[dev]
398 if d['mapped']:
399 continue
400
401 msger.debug("Running kpartx on %s" % d['disk'].device )
402 rc, kpartxOutput = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device])
403 kpartxOutput = kpartxOutput.splitlines()
404
405 if rc != 0:
406 raise MountError("Failed to query partition mapping for '%s'" %
407 d['disk'].device)
408
409 # Strip trailing blank and mask verbose output
410 i = 0
411 while i < len(kpartxOutput) and kpartxOutput[i][0:4] != "loop":
412 i = i + 1
413 kpartxOutput = kpartxOutput[i:]
414
415 # Make sure kpartx reported the right count of partitions
416 if len(kpartxOutput) != d['numpart']:
417 # If this disk has more than 3 partitions, then in case of MBR
418 # paritions there is an extended parition. Different versions
419 # of kpartx behave differently WRT the extended partition -
420 # some map it, some ignore it. This is why we do the below hack
421 # - if kpartx reported one more partition and the partition
422 # table type is "msdos" and the amount of partitions is more
423 # than 3, we just assume kpartx mapped the extended parition
424 # and we remove it.
425 if len(kpartxOutput) == d['numpart'] + 1 \
426 and d['ptable_format'] == 'msdos' and len(kpartxOutput) > 3:
427 kpartxOutput.pop(3)
428 else:
429 raise MountError("Unexpected number of partitions from " \
430 "kpartx: %d != %d" % \
431 (len(kpartxOutput), d['numpart']))
432
433 for i in range(len(kpartxOutput)):
434 line = kpartxOutput[i]
435 newdev = line.split()[0]
436 mapperdev = "/dev/mapper/" + newdev
437 loopdev = d['disk'].device + newdev[-1]
438
439 msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev))
440 pnum = d['partitions'][i]
441 self.partitions[pnum]['device'] = loopdev
442
443 # grub's install wants partitions to be named
444 # to match their parent device + partition num
445 # kpartx doesn't work like this, so we add compat
446 # symlinks to point to /dev/mapper
447 if os.path.lexists(loopdev):
448 os.unlink(loopdev)
449 os.symlink(mapperdev, loopdev)
450
451 msger.debug("Adding partx mapping for %s" % d['disk'].device)
452 rc = runner.show([self.kpartx, "-v", "-a", d['disk'].device])
453
454 if rc != 0:
455 # Make sure that the device maps are also removed on error case.
456 # The d['mapped'] isn't set to True if the kpartx fails so
457 # failed mapping will not be cleaned on cleanup either.
458 runner.quiet([self.kpartx, "-d", d['disk'].device])
459 raise MountError("Failed to map partitions for '%s'" %
460 d['disk'].device)
461
462 # FIXME: there is a bit delay for multipath device setup,
463 # wait 10ms for the setup
464 import time
465 time.sleep(10)
466 d['mapped'] = True
467
468 def __unmap_partitions(self):
469 for dev in self.disks.keys():
470 d = self.disks[dev]
471 if not d['mapped']:
472 continue
473
474 msger.debug("Removing compat symlinks")
475 for pnum in d['partitions']:
476 if self.partitions[pnum]['device'] != None:
477 os.unlink(self.partitions[pnum]['device'])
478 self.partitions[pnum]['device'] = None
479
480 msger.debug("Unmapping %s" % d['disk'].device)
481 rc = runner.quiet([self.kpartx, "-d", d['disk'].device])
482 if rc != 0:
483 raise MountError("Failed to unmap partitions for '%s'" %
484 d['disk'].device)
485
486 d['mapped'] = False
487
488 def __calculate_mountorder(self):
489 msger.debug("Calculating mount order")
490 for p in self.partitions:
491 if p['mountpoint']:
492 self.mountOrder.append(p['mountpoint'])
493 self.unmountOrder.append(p['mountpoint'])
494
495 self.mountOrder.sort()
496 self.unmountOrder.sort()
497 self.unmountOrder.reverse()
498
499 def cleanup(self):
500 Mount.cleanup(self)
501 if self.disks:
502 self.__unmap_partitions()
503 for dev in self.disks.keys():
504 d = self.disks[dev]
505 try:
506 d['disk'].cleanup()
507 except:
508 pass
509
510 def unmount(self):
511 self.__unmount_subvolumes()
512 for mp in self.unmountOrder:
513 if mp == 'swap':
514 continue
515 p = None
516 for p1 in self.partitions:
517 if p1['mountpoint'] == mp:
518 p = p1
519 break
520
521 if p['mount'] != None:
522 try:
523 # Create subvolume snapshot here
524 if p['fstype'] == "btrfs" and p['mountpoint'] == "/" and not self.snapshot_created:
525 self.__create_subvolume_snapshots(p, p["mount"])
526 p['mount'].cleanup()
527 except:
528 pass
529 p['mount'] = None
530
531 # Only for btrfs
532 def __get_subvolume_id(self, rootpath, subvol):
533 if not self.btrfscmd:
534 self.btrfscmd=find_binary_path("btrfs")
535 argv = [ self.btrfscmd, "subvolume", "list", rootpath ]
536
537 rc, out = runner.runtool(argv)
538 msger.debug(out)
539
540 if rc != 0:
541 raise MountError("Failed to get subvolume id from %s', return code: %d." % (rootpath, rc))
542
543 subvolid = -1
544 for line in out.splitlines():
545 if line.endswith(" path %s" % subvol):
546 subvolid = line.split()[1]
547 if not subvolid.isdigit():
548 raise MountError("Invalid subvolume id: %s" % subvolid)
549 subvolid = int(subvolid)
550 break
551 return subvolid
552
553 def __create_subvolume_metadata(self, p, pdisk):
554 if len(self.subvolumes) == 0:
555 return
556
557 argv = [ self.btrfscmd, "subvolume", "list", pdisk.mountdir ]
558 rc, out = runner.runtool(argv)
559 msger.debug(out)
560
561 if rc != 0:
562 raise MountError("Failed to get subvolume id from %s', return code: %d." % (pdisk.mountdir, rc))
563
564 subvolid_items = out.splitlines()
565 subvolume_metadata = ""
566 for subvol in self.subvolumes:
567 for line in subvolid_items:
568 if line.endswith(" path %s" % subvol["subvol"]):
569 subvolid = line.split()[1]
570 if not subvolid.isdigit():
571 raise MountError("Invalid subvolume id: %s" % subvolid)
572
573 subvolid = int(subvolid)
574 opts = subvol["fsopts"].split(",")
575 for opt in opts:
576 if opt.strip().startswith("subvol="):
577 opts.remove(opt)
578 break
579 fsopts = ",".join(opts)
580 subvolume_metadata += "%d\t%s\t%s\t%s\n" % (subvolid, subvol["subvol"], subvol['mountpoint'], fsopts)
581
582 if subvolume_metadata:
583 fd = open("%s/.subvolume_metadata" % pdisk.mountdir, "w")
584 fd.write(subvolume_metadata)
585 fd.close()
586
587 def __get_subvolume_metadata(self, p, pdisk):
588 subvolume_metadata_file = "%s/.subvolume_metadata" % pdisk.mountdir
589 if not os.path.exists(subvolume_metadata_file):
590 return
591
592 fd = open(subvolume_metadata_file, "r")
593 content = fd.read()
594 fd.close()
595
596 for line in content.splitlines():
597 items = line.split("\t")
598 if items and len(items) == 4:
599 self.subvolumes.append({'size': 0, # In sectors
600 'mountpoint': items[2], # Mount relative to chroot
601 'fstype': "btrfs", # Filesystem type
602 'fsopts': items[3] + ",subvol=%s" % items[1], # Filesystem mount options
603 'disk_name': p['disk_name'], # physical disk name holding partition
604 'device': None, # kpartx device node for partition
605 'mount': None, # Mount object
606 'subvol': items[1], # Subvolume name
607 'boot': False, # Bootable flag
608 'mounted': False # Mount flag
609 })
610
611 def __create_subvolumes(self, p, pdisk):
612 """ Create all the subvolumes. """
613
614 for subvol in self.subvolumes:
615 argv = [ self.btrfscmd, "subvolume", "create", pdisk.mountdir + "/" + subvol["subvol"]]
616
617 rc = runner.show(argv)
618 if rc != 0:
619 raise MountError("Failed to create subvolume '%s', return code: %d." % (subvol["subvol"], rc))
620
621 # Set default subvolume, subvolume for "/" is default
622 subvol = None
623 for subvolume in self.subvolumes:
624 if subvolume["mountpoint"] == "/" and p['disk_name'] == subvolume['disk_name']:
625 subvol = subvolume
626 break
627
628 if subvol:
629 # Get default subvolume id
630 subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
631 # Set default subvolume
632 if subvolid != -1:
633 rc = runner.show([ self.btrfscmd, "subvolume", "set-default", "%d" % subvolid, pdisk.mountdir])
634 if rc != 0:
635 raise MountError("Failed to set default subvolume id: %d', return code: %d." % (subvolid, rc))
636
637 self.__create_subvolume_metadata(p, pdisk)
638
639 def __mount_subvolumes(self, p, pdisk):
640 if self.skipformat:
641 # Get subvolume info
642 self.__get_subvolume_metadata(p, pdisk)
643 # Set default mount options
644 if len(self.subvolumes) != 0:
645 for subvol in self.subvolumes:
646 if subvol["mountpoint"] == p["mountpoint"] == "/":
647 opts = subvol["fsopts"].split(",")
648 for opt in opts:
649 if opt.strip().startswith("subvol="):
650 opts.remove(opt)
651 break
652 pdisk.fsopts = ",".join(opts)
653 break
654
655 if len(self.subvolumes) == 0:
656 # Return directly if no subvolumes
657 return
658
659 # Remount to make default subvolume mounted
660 rc = runner.show([self.umountcmd, pdisk.mountdir])
661 if rc != 0:
662 raise MountError("Failed to umount %s" % pdisk.mountdir)
663
664 rc = runner.show([self.mountcmd, "-o", pdisk.fsopts, pdisk.disk.device, pdisk.mountdir])
665 if rc != 0:
666 raise MountError("Failed to umount %s" % pdisk.mountdir)
667
668 for subvol in self.subvolumes:
669 if subvol["mountpoint"] == "/":
670 continue
671 subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
672 if subvolid == -1:
673 msger.debug("WARNING: invalid subvolume %s" % subvol["subvol"])
674 continue
675 # Replace subvolume name with subvolume ID
676 opts = subvol["fsopts"].split(",")
677 for opt in opts:
678 if opt.strip().startswith("subvol="):
679 opts.remove(opt)
680 break
681
682 opts.extend(["subvolrootid=0", "subvol=%s" % subvol["subvol"]])
683 fsopts = ",".join(opts)
684 subvol['fsopts'] = fsopts
685 mountpoint = self.mountdir + subvol['mountpoint']
686 makedirs(mountpoint)
687 rc = runner.show([self.mountcmd, "-o", fsopts, pdisk.disk.device, mountpoint])
688 if rc != 0:
689 raise MountError("Failed to mount subvolume %s to %s" % (subvol["subvol"], mountpoint))
690 subvol["mounted"] = True
691
692 def __unmount_subvolumes(self):
693 """ It may be called multiple times, so we need to chekc if it is still mounted. """
694 for subvol in self.subvolumes:
695 if subvol["mountpoint"] == "/":
696 continue
697 if not subvol["mounted"]:
698 continue
699 mountpoint = self.mountdir + subvol['mountpoint']
700 rc = runner.show([self.umountcmd, mountpoint])
701 if rc != 0:
702 raise MountError("Failed to unmount subvolume %s from %s" % (subvol["subvol"], mountpoint))
703 subvol["mounted"] = False
704
705 def __create_subvolume_snapshots(self, p, pdisk):
706 import time
707
708 if self.snapshot_created:
709 return
710
711 # Remount with subvolid=0
712 rc = runner.show([self.umountcmd, pdisk.mountdir])
713 if rc != 0:
714 raise MountError("Failed to umount %s" % pdisk.mountdir)
715 if pdisk.fsopts:
716 mountopts = pdisk.fsopts + ",subvolid=0"
717 else:
718 mountopts = "subvolid=0"
719 rc = runner.show([self.mountcmd, "-o", mountopts, pdisk.disk.device, pdisk.mountdir])
720 if rc != 0:
721 raise MountError("Failed to umount %s" % pdisk.mountdir)
722
723 # Create all the subvolume snapshots
724 snapshotts = time.strftime("%Y%m%d-%H%M")
725 for subvol in self.subvolumes:
726 subvolpath = pdisk.mountdir + "/" + subvol["subvol"]
727 snapshotpath = subvolpath + "_%s-1" % snapshotts
728 rc = runner.show([ self.btrfscmd, "subvolume", "snapshot", subvolpath, snapshotpath ])
729 if rc != 0:
730 raise MountError("Failed to create subvolume snapshot '%s' for '%s', return code: %d." % (snapshotpath, subvolpath, rc))
731
732 self.snapshot_created = True
733
734 def __install_partition(self, num, source_file, start, size):
735 """
736 Install source_file contents into a partition.
737 """
738 if not source_file: # nothing to install
739 return
740
741 # Start is included in the size so need to substract one from the end.
742 end = start + size - 1
743 msger.debug("Installed %s in partition %d, sectors %d-%d, size %d sectors" % (source_file, num, start, end, size))
744
745 dd_cmd = "dd if=%s of=%s bs=%d seek=%d count=%d conv=notrunc" % \
746 (source_file, self.image_file, self.sector_size, start, size)
747 rc, out = exec_cmd(dd_cmd)
748
749
750 def install(self, image_file):
751 msger.debug("Installing partitions")
752
753 self.image_file = image_file
754
755 for p in self.partitions:
756 d = self.disks[p['disk_name']]
757 if d['ptable_format'] == "msdos" and p['num'] == 5:
758 # The last sector of the 3rd partition was reserved for the EBR
759 # of the first _logical_ partition. This is why the extended
760 # partition should start one sector before the first logical
761 # partition.
762 self.__install_partition(p['num'], p['source_file'],
763 p['start'] - 1,
764 d['offset'] - p['start'])
765
766 self.__install_partition(p['num'], p['source_file'],
767 p['start'], p['size'])
768
769 def mount(self):
770 for dev in self.disks.keys():
771 d = self.disks[dev]
772 d['disk'].create()
773
774 self.__format_disks()
775
776 self.__calculate_mountorder()
777
778 return
779
780 def resparse(self, size = None):
781 # Can't re-sparse a disk image - too hard
782 pass
diff --git a/scripts/lib/mic/utils/proxy.py b/scripts/lib/mic/utils/proxy.py
new file mode 100644
index 0000000000..91451a2d01
--- /dev/null
+++ b/scripts/lib/mic/utils/proxy.py
@@ -0,0 +1,183 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2010, 2011 Intel, Inc.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms of the GNU General Public License as published by the Free
7# Software Foundation; version 2 of the License
8#
9# This program is distributed in the hope that it will be useful, but
10# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12# for more details.
13#
14# You should have received a copy of the GNU General Public License along
15# with this program; if not, write to the Free Software Foundation, Inc., 59
16# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18import os
19import urlparse
20
21_my_proxies = {}
22_my_noproxy = None
23_my_noproxy_list = []
24
25def set_proxy_environ():
26 global _my_noproxy, _my_proxies
27 if not _my_proxies:
28 return
29 for key in _my_proxies.keys():
30 os.environ[key + "_proxy"] = _my_proxies[key]
31 if not _my_noproxy:
32 return
33 os.environ["no_proxy"] = _my_noproxy
34
35def unset_proxy_environ():
36 for env in ('http_proxy',
37 'https_proxy',
38 'ftp_proxy',
39 'all_proxy'):
40 if env in os.environ:
41 del os.environ[env]
42
43 ENV=env.upper()
44 if ENV in os.environ:
45 del os.environ[ENV]
46
47def _set_proxies(proxy = None, no_proxy = None):
48 """Return a dictionary of scheme -> proxy server URL mappings.
49 """
50
51 global _my_noproxy, _my_proxies
52 _my_proxies = {}
53 _my_noproxy = None
54 proxies = []
55 if proxy:
56 proxies.append(("http_proxy", proxy))
57 if no_proxy:
58 proxies.append(("no_proxy", no_proxy))
59
60 # Get proxy settings from environment if not provided
61 if not proxy and not no_proxy:
62 proxies = os.environ.items()
63
64 # Remove proxy env variables, urllib2 can't handle them correctly
65 unset_proxy_environ()
66
67 for name, value in proxies:
68 name = name.lower()
69 if value and name[-6:] == '_proxy':
70 if name[0:2] != "no":
71 _my_proxies[name[:-6]] = value
72 else:
73 _my_noproxy = value
74
75def _ip_to_int(ip):
76 ipint=0
77 shift=24
78 for dec in ip.split("."):
79 ipint |= int(dec) << shift
80 shift -= 8
81 return ipint
82
83def _int_to_ip(val):
84 ipaddr=""
85 shift=0
86 for i in range(4):
87 dec = val >> shift
88 dec &= 0xff
89 ipaddr = ".%d%s" % (dec, ipaddr)
90 shift += 8
91 return ipaddr[1:]
92
93def _isip(host):
94 if host.replace(".", "").isdigit():
95 return True
96 return False
97
98def _set_noproxy_list():
99 global _my_noproxy, _my_noproxy_list
100 _my_noproxy_list = []
101 if not _my_noproxy:
102 return
103 for item in _my_noproxy.split(","):
104 item = item.strip()
105 if not item:
106 continue
107
108 if item[0] != '.' and item.find("/") == -1:
109 # Need to match it
110 _my_noproxy_list.append({"match":0,"needle":item})
111
112 elif item[0] == '.':
113 # Need to match at tail
114 _my_noproxy_list.append({"match":1,"needle":item})
115
116 elif item.find("/") > 3:
117 # IP/MASK, need to match at head
118 needle = item[0:item.find("/")].strip()
119 ip = _ip_to_int(needle)
120 netmask = 0
121 mask = item[item.find("/")+1:].strip()
122
123 if mask.isdigit():
124 netmask = int(mask)
125 netmask = ~((1<<(32-netmask)) - 1)
126 ip &= netmask
127 else:
128 shift=24
129 netmask=0
130 for dec in mask.split("."):
131 netmask |= int(dec) << shift
132 shift -= 8
133 ip &= netmask
134
135 _my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask})
136
137def _isnoproxy(url):
138 (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
139
140 if '@' in host:
141 user_pass, host = host.split('@', 1)
142
143 if ':' in host:
144 host, port = host.split(':', 1)
145
146 hostisip = _isip(host)
147 for item in _my_noproxy_list:
148 if hostisip and item["match"] <= 1:
149 continue
150
151 if item["match"] == 2 and hostisip:
152 if (_ip_to_int(host) & item["netmask"]) == item["needle"]:
153 return True
154
155 if item["match"] == 0:
156 if host == item["needle"]:
157 return True
158
159 if item["match"] == 1:
160 if host.rfind(item["needle"]) > 0:
161 return True
162
163 return False
164
165def set_proxies(proxy = None, no_proxy = None):
166 _set_proxies(proxy, no_proxy)
167 _set_noproxy_list()
168 set_proxy_environ()
169
170def get_proxy_for(url):
171 if url.startswith('file:') or _isnoproxy(url):
172 return None
173
174 type = url[0:url.index(":")]
175 proxy = None
176 if _my_proxies.has_key(type):
177 proxy = _my_proxies[type]
178 elif _my_proxies.has_key("http"):
179 proxy = _my_proxies["http"]
180 else:
181 proxy = None
182
183 return proxy
diff --git a/scripts/lib/mic/utils/rpmmisc.py b/scripts/lib/mic/utils/rpmmisc.py
new file mode 100644
index 0000000000..af15763e18
--- /dev/null
+++ b/scripts/lib/mic/utils/rpmmisc.py
@@ -0,0 +1,600 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2008, 2009, 2010, 2011 Intel, Inc.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms of the GNU General Public License as published by the Free
7# Software Foundation; version 2 of the License
8#
9# This program is distributed in the hope that it will be useful, but
10# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12# for more details.
13#
14# You should have received a copy of the GNU General Public License along
15# with this program; if not, write to the Free Software Foundation, Inc., 59
16# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18import os
19import sys
20import re
21import rpm
22
23from mic import msger
24from mic.utils.errors import CreatorError
25from mic.utils.proxy import get_proxy_for
26from mic.utils import runner
27
28
29class RPMInstallCallback:
30 """ Command line callback class for callbacks from the RPM library.
31 """
32
33 def __init__(self, ts, output=1):
34 self.output = output
35 self.callbackfilehandles = {}
36 self.total_actions = 0
37 self.total_installed = 0
38 self.installed_pkg_names = []
39 self.total_removed = 0
40 self.mark = "+"
41 self.marks = 40
42 self.lastmsg = None
43 self.tsInfo = None # this needs to be set for anything else to work
44 self.ts = ts
45 self.filelog = False
46 self.logString = []
47 self.headmsg = "Installing"
48
49 def _dopkgtup(self, hdr):
50 tmpepoch = hdr['epoch']
51 if tmpepoch is None: epoch = '0'
52 else: epoch = str(tmpepoch)
53
54 return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release'])
55
56 def _makeHandle(self, hdr):
57 handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'],
58 hdr['release'], hdr['arch'])
59
60 return handle
61
62 def _localprint(self, msg):
63 if self.output:
64 msger.info(msg)
65
66 def _makefmt(self, percent, progress = True):
67 l = len(str(self.total_actions))
68 size = "%s.%s" % (l, l)
69 fmt_done = "[%" + size + "s/%" + size + "s]"
70 done = fmt_done % (self.total_installed + self.total_removed,
71 self.total_actions)
72 marks = self.marks - (2 * l)
73 width = "%s.%s" % (marks, marks)
74 fmt_bar = "%-" + width + "s"
75 if progress:
76 bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
77 fmt = "\r %-10.10s: %-20.20s " + bar + " " + done
78 else:
79 bar = fmt_bar % (self.mark * marks, )
80 fmt = " %-10.10s: %-20.20s " + bar + " " + done
81 return fmt
82
83 def _logPkgString(self, hdr):
84 """return nice representation of the package for the log"""
85 (n,a,e,v,r) = self._dopkgtup(hdr)
86 if e == '0':
87 pkg = '%s.%s %s-%s' % (n, a, v, r)
88 else:
89 pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r)
90
91 return pkg
92
93 def callback(self, what, bytes, total, h, user):
94 if what == rpm.RPMCALLBACK_TRANS_START:
95 if bytes == 6:
96 self.total_actions = total
97
98 elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
99 pass
100
101 elif what == rpm.RPMCALLBACK_TRANS_STOP:
102 pass
103
104 elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
105 self.lastmsg = None
106 hdr = None
107 if h is not None:
108 try:
109 hdr, rpmloc = h
110 except:
111 rpmloc = h
112 hdr = readRpmHeader(self.ts, h)
113
114 handle = self._makeHandle(hdr)
115 fd = os.open(rpmloc, os.O_RDONLY)
116 self.callbackfilehandles[handle]=fd
117 if hdr['name'] not in self.installed_pkg_names:
118 self.installed_pkg_names.append(hdr['name'])
119 self.total_installed += 1
120 return fd
121 else:
122 self._localprint("No header - huh?")
123
124 elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
125 hdr = None
126 if h is not None:
127 try:
128 hdr, rpmloc = h
129 except:
130 rpmloc = h
131 hdr = readRpmHeader(self.ts, h)
132
133 handle = self._makeHandle(hdr)
134 os.close(self.callbackfilehandles[handle])
135 fd = 0
136
137 # log stuff
138 #pkgtup = self._dopkgtup(hdr)
139 self.logString.append(self._logPkgString(hdr))
140
141 elif what == rpm.RPMCALLBACK_INST_PROGRESS:
142 if h is not None:
143 percent = (self.total_installed*100L)/self.total_actions
144 if total > 0:
145 try:
146 hdr, rpmloc = h
147 except:
148 rpmloc = h
149
150 m = re.match("(.*)-(\d+.*)-(\d+\.\d+)\.(.+)\.rpm", os.path.basename(rpmloc))
151 if m:
152 pkgname = m.group(1)
153 else:
154 pkgname = os.path.basename(rpmloc)
155 if self.output:
156 fmt = self._makefmt(percent)
157 msg = fmt % (self.headmsg, pkgname)
158 if msg != self.lastmsg:
159 self.lastmsg = msg
160
161 msger.info(msg)
162
163 if self.total_installed == self.total_actions:
164 msger.raw('')
165 msger.verbose('\n'.join(self.logString))
166
167 elif what == rpm.RPMCALLBACK_UNINST_START:
168 pass
169
170 elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
171 pass
172
173 elif what == rpm.RPMCALLBACK_UNINST_STOP:
174 self.total_removed += 1
175
176 elif what == rpm.RPMCALLBACK_REPACKAGE_START:
177 pass
178
179 elif what == rpm.RPMCALLBACK_REPACKAGE_STOP:
180 pass
181
182 elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
183 pass
184
185def readRpmHeader(ts, filename):
186 """ Read an rpm header. """
187
188 fd = os.open(filename, os.O_RDONLY)
189 h = ts.hdrFromFdno(fd)
190 os.close(fd)
191 return h
192
193def splitFilename(filename):
194 """ Pass in a standard style rpm fullname
195
196 Return a name, version, release, epoch, arch, e.g.::
197 foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
198 1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
199 """
200
201 if filename[-4:] == '.rpm':
202 filename = filename[:-4]
203
204 archIndex = filename.rfind('.')
205 arch = filename[archIndex+1:]
206
207 relIndex = filename[:archIndex].rfind('-')
208 rel = filename[relIndex+1:archIndex]
209
210 verIndex = filename[:relIndex].rfind('-')
211 ver = filename[verIndex+1:relIndex]
212
213 epochIndex = filename.find(':')
214 if epochIndex == -1:
215 epoch = ''
216 else:
217 epoch = filename[:epochIndex]
218
219 name = filename[epochIndex + 1:verIndex]
220 return name, ver, rel, epoch, arch
221
222def getCanonX86Arch(arch):
223 #
224 if arch == "i586":
225 f = open("/proc/cpuinfo", "r")
226 lines = f.readlines()
227 f.close()
228 for line in lines:
229 if line.startswith("model name") and line.find("Geode(TM)") != -1:
230 return "geode"
231 return arch
232 # only athlon vs i686 isn't handled with uname currently
233 if arch != "i686":
234 return arch
235
236 # if we're i686 and AuthenticAMD, then we should be an athlon
237 f = open("/proc/cpuinfo", "r")
238 lines = f.readlines()
239 f.close()
240 for line in lines:
241 if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
242 return "athlon"
243 # i686 doesn't guarantee cmov, but we depend on it
244 elif line.startswith("flags") and line.find("cmov") == -1:
245 return "i586"
246
247 return arch
248
249def getCanonX86_64Arch(arch):
250 if arch != "x86_64":
251 return arch
252
253 vendor = None
254 f = open("/proc/cpuinfo", "r")
255 lines = f.readlines()
256 f.close()
257 for line in lines:
258 if line.startswith("vendor_id"):
259 vendor = line.split(':')[1]
260 break
261 if vendor is None:
262 return arch
263
264 if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
265 return "amd64"
266 if vendor.find("GenuineIntel") != -1:
267 return "ia32e"
268 return arch
269
270def getCanonArch():
271 arch = os.uname()[4]
272
273 if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
274 return getCanonX86Arch(arch)
275
276 if arch == "x86_64":
277 return getCanonX86_64Arch(arch)
278
279 return arch
280
281# Copy from libsatsolver:poolarch.c, with cleanup
282archPolicies = {
283 "x86_64": "x86_64:i686:i586:i486:i386",
284 "i686": "i686:i586:i486:i386",
285 "i586": "i586:i486:i386",
286 "ia64": "ia64:i686:i586:i486:i386",
287 "armv7tnhl": "armv7tnhl:armv7thl:armv7nhl:armv7hl",
288 "armv7thl": "armv7thl:armv7hl",
289 "armv7nhl": "armv7nhl:armv7hl",
290 "armv7hl": "armv7hl",
291 "armv7l": "armv7l:armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
292 "armv6l": "armv6l:armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
293 "armv5tejl": "armv5tejl:armv5tel:armv5l:armv4tl:armv4l:armv3l",
294 "armv5tel": "armv5tel:armv5l:armv4tl:armv4l:armv3l",
295 "armv5l": "armv5l:armv4tl:armv4l:armv3l",
296}
297
298# dict mapping arch -> ( multicompat, best personality, biarch personality )
299multilibArches = {
300 "x86_64": ( "athlon", "x86_64", "athlon" ),
301}
302
303# from yumUtils.py
304arches = {
305 # ia32
306 "athlon": "i686",
307 "i686": "i586",
308 "geode": "i586",
309 "i586": "i486",
310 "i486": "i386",
311 "i386": "noarch",
312
313 # amd64
314 "x86_64": "athlon",
315 "amd64": "x86_64",
316 "ia32e": "x86_64",
317
318 # arm
319 "armv7tnhl": "armv7nhl",
320 "armv7nhl": "armv7hl",
321 "armv7hl": "noarch",
322 "armv7l": "armv6l",
323 "armv6l": "armv5tejl",
324 "armv5tejl": "armv5tel",
325 "armv5tel": "noarch",
326
327 #itanium
328 "ia64": "noarch",
329}
330
331def isMultiLibArch(arch=None):
332 """returns true if arch is a multilib arch, false if not"""
333 if arch is None:
334 arch = getCanonArch()
335
336 if not arches.has_key(arch): # or we could check if it is noarch
337 return False
338
339 if multilibArches.has_key(arch):
340 return True
341
342 if multilibArches.has_key(arches[arch]):
343 return True
344
345 return False
346
347def getBaseArch():
348 myarch = getCanonArch()
349 if not arches.has_key(myarch):
350 return myarch
351
352 if isMultiLibArch(arch=myarch):
353 if multilibArches.has_key(myarch):
354 return myarch
355 else:
356 return arches[myarch]
357
358 if arches.has_key(myarch):
359 basearch = myarch
360 value = arches[basearch]
361 while value != 'noarch':
362 basearch = value
363 value = arches[basearch]
364
365 return basearch
366
367def checkRpmIntegrity(bin_rpm, package):
368 return runner.quiet([bin_rpm, "-K", "--nosignature", package])
369
370def checkSig(ts, package):
371 """ Takes a transaction set and a package, check it's sigs,
372 return 0 if they are all fine
373 return 1 if the gpg key can't be found
374 return 2 if the header is in someway damaged
375 return 3 if the key is not trusted
376 return 4 if the pkg is not gpg or pgp signed
377 """
378
379 value = 0
380 currentflags = ts.setVSFlags(0)
381 fdno = os.open(package, os.O_RDONLY)
382 try:
383 hdr = ts.hdrFromFdno(fdno)
384
385 except rpm.error, e:
386 if str(e) == "public key not availaiable":
387 value = 1
388 if str(e) == "public key not available":
389 value = 1
390 if str(e) == "public key not trusted":
391 value = 3
392 if str(e) == "error reading package header":
393 value = 2
394 else:
395 error, siginfo = getSigInfo(hdr)
396 if error == 101:
397 os.close(fdno)
398 del hdr
399 value = 4
400 else:
401 del hdr
402
403 try:
404 os.close(fdno)
405 except OSError:
406 pass
407
408 ts.setVSFlags(currentflags) # put things back like they were before
409 return value
410
411def getSigInfo(hdr):
412 """ checks signature from an hdr hand back signature information and/or
413 an error code
414 """
415
416 import locale
417 locale.setlocale(locale.LC_ALL, 'C')
418
419 string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'
420 siginfo = hdr.sprintf(string)
421 if siginfo != '(none)':
422 error = 0
423 sigtype, sigdate, sigid = siginfo.split(',')
424 else:
425 error = 101
426 sigtype = 'MD5'
427 sigdate = 'None'
428 sigid = 'None'
429
430 infotuple = (sigtype, sigdate, sigid)
431 return error, infotuple
432
433def checkRepositoryEULA(name, repo):
434 """ This function is to check the EULA file if provided.
435 return True: no EULA or accepted
436 return False: user declined the EULA
437 """
438
439 import tempfile
440 import shutil
441 import urlparse
442 import urllib2 as u2
443 import httplib
444 from mic.utils.errors import CreatorError
445
446 def _check_and_download_url(u2opener, url, savepath):
447 try:
448 if u2opener:
449 f = u2opener.open(url)
450 else:
451 f = u2.urlopen(url)
452 except u2.HTTPError, httperror:
453 if httperror.code in (404, 503):
454 return None
455 else:
456 raise CreatorError(httperror)
457 except OSError, oserr:
458 if oserr.errno == 2:
459 return None
460 else:
461 raise CreatorError(oserr)
462 except IOError, oserr:
463 if hasattr(oserr, "reason") and oserr.reason.errno == 2:
464 return None
465 else:
466 raise CreatorError(oserr)
467 except u2.URLError, err:
468 raise CreatorError(err)
469 except httplib.HTTPException, e:
470 raise CreatorError(e)
471
472 # save to file
473 licf = open(savepath, "w")
474 licf.write(f.read())
475 licf.close()
476 f.close()
477
478 return savepath
479
480 def _pager_file(savepath):
481
482 if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
483 pagers = ('w3m', 'links', 'lynx', 'less', 'more')
484 else:
485 pagers = ('less', 'more')
486
487 file_showed = False
488 for pager in pagers:
489 cmd = "%s %s" % (pager, savepath)
490 try:
491 os.system(cmd)
492 except OSError:
493 continue
494 else:
495 file_showed = True
496 break
497
498 if not file_showed:
499 f = open(savepath)
500 msger.raw(f.read())
501 f.close()
502 msger.pause()
503
504 # when proxy needed, make urllib2 follow it
505 proxy = repo.proxy
506 proxy_username = repo.proxy_username
507 proxy_password = repo.proxy_password
508
509 if not proxy:
510 proxy = get_proxy_for(repo.baseurl[0])
511
512 handlers = []
513 auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
514 u2opener = None
515 if proxy:
516 if proxy_username:
517 proxy_netloc = urlparse.urlsplit(proxy).netloc
518 if proxy_password:
519 proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
520 else:
521 proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
522 else:
523 proxy_url = proxy
524
525 proxy_support = u2.ProxyHandler({'http': proxy_url,
526 'https': proxy_url,
527 'ftp': proxy_url})
528 handlers.append(proxy_support)
529
530 # download all remote files to one temp dir
531 baseurl = None
532 repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
533
534 for url in repo.baseurl:
535 tmphandlers = handlers[:]
536
537 (scheme, host, path, parm, query, frag) = urlparse.urlparse(url.rstrip('/') + '/')
538 if scheme not in ("http", "https", "ftp", "ftps", "file"):
539 raise CreatorError("Error: invalid url %s" % url)
540
541 if '@' in host:
542 try:
543 user_pass, host = host.split('@', 1)
544 if ':' in user_pass:
545 user, password = user_pass.split(':', 1)
546 except ValueError, e:
547 raise CreatorError('Bad URL: %s' % url)
548
549 msger.verbose("adding HTTP auth: %s, XXXXXXXX" %(user))
550 auth_handler.add_password(None, host, user, password)
551 tmphandlers.append(auth_handler)
552 url = scheme + "://" + host + path + parm + query + frag
553
554 if tmphandlers:
555 u2opener = u2.build_opener(*tmphandlers)
556
557 # try to download
558 repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
559 repo_eula_path = _check_and_download_url(
560 u2opener,
561 repo_eula_url,
562 os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
563 if repo_eula_path:
564 # found
565 baseurl = url
566 break
567
568 if not baseurl:
569 shutil.rmtree(repo_lic_dir) #cleanup
570 return True
571
572 # show the license file
573 msger.info('For the software packages in this yum repo:')
574 msger.info(' %s: %s' % (name, baseurl))
575 msger.info('There is an "End User License Agreement" file that need to be checked.')
576 msger.info('Please read the terms and conditions outlined in it and answer the followed qustions.')
577 msger.pause()
578
579 _pager_file(repo_eula_path)
580
581 # Asking for the "Accept/Decline"
582 if not msger.ask('Would you agree to the terms and conditions outlined in the above End User License Agreement?'):
583 msger.warning('Will not install pkgs from this repo.')
584 shutil.rmtree(repo_lic_dir) #cleanup
585 return False
586
587 # try to find support_info.html for extra infomation
588 repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
589 repo_info_path = _check_and_download_url(
590 u2opener,
591 repo_info_url,
592 os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
593 if repo_info_path:
594 msger.info('There is one more file in the repo for additional support information, please read it')
595 msger.pause()
596 _pager_file(repo_info_path)
597
598 #cleanup
599 shutil.rmtree(repo_lic_dir)
600 return True
diff --git a/scripts/lib/mic/utils/runner.py b/scripts/lib/mic/utils/runner.py
new file mode 100644
index 0000000000..fded3c93fa
--- /dev/null
+++ b/scripts/lib/mic/utils/runner.py
@@ -0,0 +1,109 @@
1#!/usr/bin/python -tt
2#
3# Copyright (c) 2011 Intel, Inc.
4#
5# This program is free software; you can redistribute it and/or modify it
6# under the terms of the GNU General Public License as published by the Free
7# Software Foundation; version 2 of the License
8#
9# This program is distributed in the hope that it will be useful, but
10# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12# for more details.
13#
14# You should have received a copy of the GNU General Public License along
15# with this program; if not, write to the Free Software Foundation, Inc., 59
16# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18import os
19import subprocess
20
21from mic import msger
22
23def runtool(cmdln_or_args, catch=1):
24 """ wrapper for most of the subprocess calls
25 input:
26 cmdln_or_args: can be both args and cmdln str (shell=True)
27 catch: 0, quitely run
28 1, only STDOUT
29 2, only STDERR
30 3, both STDOUT and STDERR
31 return:
32 (rc, output)
33 if catch==0: the output will always None
34 """
35
36 if catch not in (0, 1, 2, 3):
37 # invalid catch selection, will cause exception, that's good
38 return None
39
40 if isinstance(cmdln_or_args, list):
41 cmd = cmdln_or_args[0]
42 shell = False
43 else:
44 import shlex
45 cmd = shlex.split(cmdln_or_args)[0]
46 shell = True
47
48 if catch != 3:
49 dev_null = os.open("/dev/null", os.O_WRONLY)
50
51 if catch == 0:
52 sout = dev_null
53 serr = dev_null
54 elif catch == 1:
55 sout = subprocess.PIPE
56 serr = dev_null
57 elif catch == 2:
58 sout = dev_null
59 serr = subprocess.PIPE
60 elif catch == 3:
61 sout = subprocess.PIPE
62 serr = subprocess.STDOUT
63
64 try:
65 p = subprocess.Popen(cmdln_or_args, stdout=sout,
66 stderr=serr, shell=shell)
67 (sout, serr) = p.communicate()
68 # combine stdout and stderr, filter None out
69 out = ''.join(filter(None, [sout, serr]))
70 except OSError, e:
71 if e.errno == 2:
72 # [Errno 2] No such file or directory
73 msger.error('Cannot run command: %s, lost dependency?' % cmd)
74 else:
75 raise # relay
76 finally:
77 if catch != 3:
78 os.close(dev_null)
79
80 return (p.returncode, out)
81
82def show(cmdln_or_args):
83 # show all the message using msger.verbose
84
85 rc, out = runtool(cmdln_or_args, catch=3)
86
87 if isinstance(cmdln_or_args, list):
88 cmd = ' '.join(cmdln_or_args)
89 else:
90 cmd = cmdln_or_args
91
92 msg = 'running command: "%s"' % cmd
93 if out: out = out.strip()
94 if out:
95 msg += ', with output::'
96 msg += '\n +----------------'
97 for line in out.splitlines():
98 msg += '\n | %s' % line
99 msg += '\n +----------------'
100
101 msger.verbose(msg)
102 return rc
103
104def outs(cmdln_or_args, catch=1):
105 # get the outputs of tools
106 return runtool(cmdln_or_args, catch)[1].strip()
107
108def quiet(cmdln_or_args):
109 return runtool(cmdln_or_args, catch=0)[0]