diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2015-01-13 14:11:36 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2015-01-14 11:32:02 +0000 |
commit | 83f8a4002ab2026696973422ee0f5733652d9527 (patch) | |
tree | e02abf41b4b2947a14fe2dad27e17c9273b91380 /bitbake/lib/pyinotify.py | |
parent | 3cd31b19d34c9aa1d40fc1f59de6a1925988bb8d (diff) | |
download | poky-83f8a4002ab2026696973422ee0f5733652d9527.tar.gz |
bitbake: bitbake: Add pyinotify to lib/
We need inotify support within bitbake and pyinotify provides the best
mechanism to add this. We have a few options:
a) Depend on pyinotify from the system
b) Add in our own copy
c) Only use pyinotify in cases like the memory resident server
For a), it would mean adding in dependencies, updating documentation and
generally creating churn for users as well as having implications for things
like the build-appliance recipe.
It turns out that glibc has the C functionality we need from version 2.4
onwards (2006) and that we just need a single python file for b), there
is no binary module needed. We therefore add in a copy of pyinotify 0.9.5
into the tree meaning we can depend on it simply and unconditionally.
c) is unattractive as we need fewer possible code paths, not more.
(Bitbake rev: d49004a4e247e3958a2f7ea9ffe5ec92794e1352)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'bitbake/lib/pyinotify.py')
-rw-r--r-- | bitbake/lib/pyinotify.py | 2416 |
1 files changed, 2416 insertions, 0 deletions
diff --git a/bitbake/lib/pyinotify.py b/bitbake/lib/pyinotify.py new file mode 100644 index 0000000000..2dae002118 --- /dev/null +++ b/bitbake/lib/pyinotify.py | |||
@@ -0,0 +1,2416 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | # pyinotify.py - python interface to inotify | ||
4 | # Copyright (c) 2005-2015 Sebastien Martini <seb@dbzteam.org> | ||
5 | # | ||
6 | # Permission is hereby granted, free of charge, to any person obtaining a copy | ||
7 | # of this software and associated documentation files (the "Software"), to deal | ||
8 | # in the Software without restriction, including without limitation the rights | ||
9 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||
10 | # copies of the Software, and to permit persons to whom the Software is | ||
11 | # furnished to do so, subject to the following conditions: | ||
12 | # | ||
13 | # The above copyright notice and this permission notice shall be included in | ||
14 | # all copies or substantial portions of the Software. | ||
15 | # | ||
16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||
21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||
22 | # THE SOFTWARE. | ||
23 | """ | ||
24 | pyinotify | ||
25 | |||
26 | @author: Sebastien Martini | ||
27 | @license: MIT License | ||
28 | @contact: seb@dbzteam.org | ||
29 | """ | ||
30 | |||
31 | class PyinotifyError(Exception): | ||
32 | """Indicates exceptions raised by a Pyinotify class.""" | ||
33 | pass | ||
34 | |||
35 | |||
36 | class UnsupportedPythonVersionError(PyinotifyError): | ||
37 | """ | ||
38 | Raised on unsupported Python versions. | ||
39 | """ | ||
40 | def __init__(self, version): | ||
41 | """ | ||
42 | @param version: Current Python version | ||
43 | @type version: string | ||
44 | """ | ||
45 | err = 'Python %s is unsupported, requires at least Python 2.4' | ||
46 | PyinotifyError.__init__(self, err % version) | ||
47 | |||
48 | |||
49 | # Check Python version | ||
50 | import sys | ||
51 | if sys.version_info < (2, 4): | ||
52 | raise UnsupportedPythonVersionError(sys.version) | ||
53 | |||
54 | |||
55 | # Import directives | ||
56 | import threading | ||
57 | import os | ||
58 | import select | ||
59 | import struct | ||
60 | import fcntl | ||
61 | import errno | ||
62 | import termios | ||
63 | import array | ||
64 | import logging | ||
65 | import atexit | ||
66 | from collections import deque | ||
67 | from datetime import datetime, timedelta | ||
68 | import time | ||
69 | import re | ||
70 | import asyncore | ||
71 | import subprocess | ||
72 | |||
73 | try: | ||
74 | from functools import reduce | ||
75 | except ImportError: | ||
76 | pass # Will fail on Python 2.4 which has reduce() builtin anyway. | ||
77 | |||
78 | try: | ||
79 | from glob import iglob as glob | ||
80 | except ImportError: | ||
81 | # Python 2.4 does not have glob.iglob(). | ||
82 | from glob import glob as glob | ||
83 | |||
84 | try: | ||
85 | import ctypes | ||
86 | import ctypes.util | ||
87 | except ImportError: | ||
88 | ctypes = None | ||
89 | |||
90 | try: | ||
91 | import inotify_syscalls | ||
92 | except ImportError: | ||
93 | inotify_syscalls = None | ||
94 | |||
95 | |||
96 | __author__ = "seb@dbzteam.org (Sebastien Martini)" | ||
97 | |||
98 | __version__ = "0.9.5" | ||
99 | |||
100 | __metaclass__ = type # Use new-style classes by default | ||
101 | |||
102 | |||
103 | # Compatibity mode: set to True to improve compatibility with | ||
104 | # Pyinotify 0.7.1. Do not set this variable yourself, call the | ||
105 | # function compatibility_mode() instead. | ||
106 | COMPATIBILITY_MODE = False | ||
107 | |||
108 | |||
109 | class InotifyBindingNotFoundError(PyinotifyError): | ||
110 | """ | ||
111 | Raised when no inotify support couldn't be found. | ||
112 | """ | ||
113 | def __init__(self): | ||
114 | err = "Couldn't find any inotify binding" | ||
115 | PyinotifyError.__init__(self, err) | ||
116 | |||
117 | |||
118 | class INotifyWrapper: | ||
119 | """ | ||
120 | Abstract class wrapping access to inotify's functions. This is an | ||
121 | internal class. | ||
122 | """ | ||
123 | @staticmethod | ||
124 | def create(): | ||
125 | # First, try to use ctypes. | ||
126 | if ctypes: | ||
127 | inotify = _CtypesLibcINotifyWrapper() | ||
128 | if inotify.init(): | ||
129 | return inotify | ||
130 | # Second, see if C extension is compiled. | ||
131 | if inotify_syscalls: | ||
132 | inotify = _INotifySyscallsWrapper() | ||
133 | if inotify.init(): | ||
134 | return inotify | ||
135 | |||
136 | def get_errno(self): | ||
137 | """ | ||
138 | Return None is no errno code is available. | ||
139 | """ | ||
140 | return self._get_errno() | ||
141 | |||
142 | def str_errno(self): | ||
143 | code = self.get_errno() | ||
144 | if code is None: | ||
145 | return 'Errno: no errno support' | ||
146 | return 'Errno=%s (%s)' % (os.strerror(code), errno.errorcode[code]) | ||
147 | |||
148 | def inotify_init(self): | ||
149 | return self._inotify_init() | ||
150 | |||
151 | def inotify_add_watch(self, fd, pathname, mask): | ||
152 | # Unicode strings must be encoded to string prior to calling this | ||
153 | # method. | ||
154 | assert isinstance(pathname, str) | ||
155 | return self._inotify_add_watch(fd, pathname, mask) | ||
156 | |||
157 | def inotify_rm_watch(self, fd, wd): | ||
158 | return self._inotify_rm_watch(fd, wd) | ||
159 | |||
160 | |||
161 | class _INotifySyscallsWrapper(INotifyWrapper): | ||
162 | def __init__(self): | ||
163 | # Stores the last errno value. | ||
164 | self._last_errno = None | ||
165 | |||
166 | def init(self): | ||
167 | assert inotify_syscalls | ||
168 | return True | ||
169 | |||
170 | def _get_errno(self): | ||
171 | return self._last_errno | ||
172 | |||
173 | def _inotify_init(self): | ||
174 | try: | ||
175 | fd = inotify_syscalls.inotify_init() | ||
176 | except IOError, err: | ||
177 | self._last_errno = err.errno | ||
178 | return -1 | ||
179 | return fd | ||
180 | |||
181 | def _inotify_add_watch(self, fd, pathname, mask): | ||
182 | try: | ||
183 | wd = inotify_syscalls.inotify_add_watch(fd, pathname, mask) | ||
184 | except IOError, err: | ||
185 | self._last_errno = err.errno | ||
186 | return -1 | ||
187 | return wd | ||
188 | |||
189 | def _inotify_rm_watch(self, fd, wd): | ||
190 | try: | ||
191 | ret = inotify_syscalls.inotify_rm_watch(fd, wd) | ||
192 | except IOError, err: | ||
193 | self._last_errno = err.errno | ||
194 | return -1 | ||
195 | return ret | ||
196 | |||
197 | |||
198 | class _CtypesLibcINotifyWrapper(INotifyWrapper): | ||
199 | def __init__(self): | ||
200 | self._libc = None | ||
201 | self._get_errno_func = None | ||
202 | |||
203 | def init(self): | ||
204 | assert ctypes | ||
205 | |||
206 | try_libc_name = 'c' | ||
207 | if sys.platform.startswith('freebsd'): | ||
208 | try_libc_name = 'inotify' | ||
209 | |||
210 | libc_name = None | ||
211 | try: | ||
212 | libc_name = ctypes.util.find_library(try_libc_name) | ||
213 | except (OSError, IOError): | ||
214 | pass # Will attemp to load it with None anyway. | ||
215 | |||
216 | if sys.version_info >= (2, 6): | ||
217 | self._libc = ctypes.CDLL(libc_name, use_errno=True) | ||
218 | self._get_errno_func = ctypes.get_errno | ||
219 | else: | ||
220 | self._libc = ctypes.CDLL(libc_name) | ||
221 | try: | ||
222 | location = self._libc.__errno_location | ||
223 | location.restype = ctypes.POINTER(ctypes.c_int) | ||
224 | self._get_errno_func = lambda: location().contents.value | ||
225 | except AttributeError: | ||
226 | pass | ||
227 | |||
228 | # Eventually check that libc has needed inotify bindings. | ||
229 | if (not hasattr(self._libc, 'inotify_init') or | ||
230 | not hasattr(self._libc, 'inotify_add_watch') or | ||
231 | not hasattr(self._libc, 'inotify_rm_watch')): | ||
232 | return False | ||
233 | |||
234 | self._libc.inotify_init.argtypes = [] | ||
235 | self._libc.inotify_init.restype = ctypes.c_int | ||
236 | self._libc.inotify_add_watch.argtypes = [ctypes.c_int, ctypes.c_char_p, | ||
237 | ctypes.c_uint32] | ||
238 | self._libc.inotify_add_watch.restype = ctypes.c_int | ||
239 | self._libc.inotify_rm_watch.argtypes = [ctypes.c_int, ctypes.c_int] | ||
240 | self._libc.inotify_rm_watch.restype = ctypes.c_int | ||
241 | return True | ||
242 | |||
243 | def _get_errno(self): | ||
244 | if self._get_errno_func is not None: | ||
245 | return self._get_errno_func() | ||
246 | return None | ||
247 | |||
248 | def _inotify_init(self): | ||
249 | assert self._libc is not None | ||
250 | return self._libc.inotify_init() | ||
251 | |||
252 | def _inotify_add_watch(self, fd, pathname, mask): | ||
253 | assert self._libc is not None | ||
254 | pathname = ctypes.create_string_buffer(pathname) | ||
255 | return self._libc.inotify_add_watch(fd, pathname, mask) | ||
256 | |||
257 | def _inotify_rm_watch(self, fd, wd): | ||
258 | assert self._libc is not None | ||
259 | return self._libc.inotify_rm_watch(fd, wd) | ||
260 | |||
261 | def _sysctl(self, *args): | ||
262 | assert self._libc is not None | ||
263 | return self._libc.sysctl(*args) | ||
264 | |||
265 | |||
266 | # Logging | ||
267 | def logger_init(): | ||
268 | """Initialize logger instance.""" | ||
269 | log = logging.getLogger("pyinotify") | ||
270 | console_handler = logging.StreamHandler() | ||
271 | console_handler.setFormatter( | ||
272 | logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s")) | ||
273 | log.addHandler(console_handler) | ||
274 | log.setLevel(20) | ||
275 | return log | ||
276 | |||
277 | log = logger_init() | ||
278 | |||
279 | |||
280 | # inotify's variables | ||
281 | class SysCtlINotify: | ||
282 | """ | ||
283 | Access (read, write) inotify's variables through sysctl. Usually it | ||
284 | requires administrator rights to update them. | ||
285 | |||
286 | Examples: | ||
287 | - Read max_queued_events attribute: myvar = max_queued_events.value | ||
288 | - Update max_queued_events attribute: max_queued_events.value = 42 | ||
289 | """ | ||
290 | |||
291 | inotify_attrs = {'max_user_instances': 1, | ||
292 | 'max_user_watches': 2, | ||
293 | 'max_queued_events': 3} | ||
294 | |||
295 | def __init__(self, attrname, inotify_wrapper): | ||
296 | # FIXME: right now only supporting ctypes | ||
297 | assert ctypes | ||
298 | self._attrname = attrname | ||
299 | self._inotify_wrapper = inotify_wrapper | ||
300 | sino = ctypes.c_int * 3 | ||
301 | self._attr = sino(5, 20, SysCtlINotify.inotify_attrs[attrname]) | ||
302 | |||
303 | @staticmethod | ||
304 | def create(attrname): | ||
305 | """ | ||
306 | Factory method instanciating and returning the right wrapper. | ||
307 | """ | ||
308 | # FIXME: right now only supporting ctypes | ||
309 | if ctypes is None: | ||
310 | return None | ||
311 | inotify_wrapper = _CtypesLibcINotifyWrapper() | ||
312 | if not inotify_wrapper.init(): | ||
313 | return None | ||
314 | return SysCtlINotify(attrname, inotify_wrapper) | ||
315 | |||
316 | def get_val(self): | ||
317 | """ | ||
318 | Gets attribute's value. Raises OSError if the operation failed. | ||
319 | |||
320 | @return: stored value. | ||
321 | @rtype: int | ||
322 | """ | ||
323 | oldv = ctypes.c_int(0) | ||
324 | size = ctypes.c_int(ctypes.sizeof(oldv)) | ||
325 | sysctl = self._inotify_wrapper._sysctl | ||
326 | res = sysctl(self._attr, 3, | ||
327 | ctypes.c_voidp(ctypes.addressof(oldv)), | ||
328 | ctypes.addressof(size), | ||
329 | None, 0) | ||
330 | if res == -1: | ||
331 | raise OSError(self._inotify_wrapper.get_errno(), | ||
332 | self._inotify_wrapper.str_errno()) | ||
333 | return oldv.value | ||
334 | |||
335 | def set_val(self, nval): | ||
336 | """ | ||
337 | Sets new attribute's value. Raises OSError if the operation failed. | ||
338 | |||
339 | @param nval: replaces current value by nval. | ||
340 | @type nval: int | ||
341 | """ | ||
342 | oldv = ctypes.c_int(0) | ||
343 | sizeo = ctypes.c_int(ctypes.sizeof(oldv)) | ||
344 | newv = ctypes.c_int(nval) | ||
345 | sizen = ctypes.c_int(ctypes.sizeof(newv)) | ||
346 | sysctl = self._inotify_wrapper._sysctl | ||
347 | res = sysctl(self._attr, 3, | ||
348 | ctypes.c_voidp(ctypes.addressof(oldv)), | ||
349 | ctypes.addressof(sizeo), | ||
350 | ctypes.c_voidp(ctypes.addressof(newv)), | ||
351 | sizen) | ||
352 | if res == -1: | ||
353 | raise OSError(self._inotify_wrapper.get_errno(), | ||
354 | self._inotify_wrapper.str_errno()) | ||
355 | |||
356 | value = property(get_val, set_val) | ||
357 | |||
358 | def __repr__(self): | ||
359 | return '<%s=%d>' % (self._attrname, self.get_val()) | ||
360 | |||
361 | |||
362 | # Inotify's variables | ||
363 | # | ||
364 | # FIXME: currently these variables are only accessible when ctypes is used, | ||
365 | # otherwise there are set to None. | ||
366 | # | ||
367 | # read: myvar = max_queued_events.value | ||
368 | # update: max_queued_events.value = 42 | ||
369 | # | ||
370 | for attrname in ('max_queued_events', 'max_user_instances', 'max_user_watches'): | ||
371 | globals()[attrname] = SysCtlINotify.create(attrname) | ||
372 | |||
373 | |||
374 | class EventsCodes: | ||
375 | """ | ||
376 | Set of codes corresponding to each kind of events. | ||
377 | Some of these flags are used to communicate with inotify, whereas | ||
378 | the others are sent to userspace by inotify notifying some events. | ||
379 | |||
380 | @cvar IN_ACCESS: File was accessed. | ||
381 | @type IN_ACCESS: int | ||
382 | @cvar IN_MODIFY: File was modified. | ||
383 | @type IN_MODIFY: int | ||
384 | @cvar IN_ATTRIB: Metadata changed. | ||
385 | @type IN_ATTRIB: int | ||
386 | @cvar IN_CLOSE_WRITE: Writtable file was closed. | ||
387 | @type IN_CLOSE_WRITE: int | ||
388 | @cvar IN_CLOSE_NOWRITE: Unwrittable file closed. | ||
389 | @type IN_CLOSE_NOWRITE: int | ||
390 | @cvar IN_OPEN: File was opened. | ||
391 | @type IN_OPEN: int | ||
392 | @cvar IN_MOVED_FROM: File was moved from X. | ||
393 | @type IN_MOVED_FROM: int | ||
394 | @cvar IN_MOVED_TO: File was moved to Y. | ||
395 | @type IN_MOVED_TO: int | ||
396 | @cvar IN_CREATE: Subfile was created. | ||
397 | @type IN_CREATE: int | ||
398 | @cvar IN_DELETE: Subfile was deleted. | ||
399 | @type IN_DELETE: int | ||
400 | @cvar IN_DELETE_SELF: Self (watched item itself) was deleted. | ||
401 | @type IN_DELETE_SELF: int | ||
402 | @cvar IN_MOVE_SELF: Self (watched item itself) was moved. | ||
403 | @type IN_MOVE_SELF: int | ||
404 | @cvar IN_UNMOUNT: Backing fs was unmounted. | ||
405 | @type IN_UNMOUNT: int | ||
406 | @cvar IN_Q_OVERFLOW: Event queued overflowed. | ||
407 | @type IN_Q_OVERFLOW: int | ||
408 | @cvar IN_IGNORED: File was ignored. | ||
409 | @type IN_IGNORED: int | ||
410 | @cvar IN_ONLYDIR: only watch the path if it is a directory (new | ||
411 | in kernel 2.6.15). | ||
412 | @type IN_ONLYDIR: int | ||
413 | @cvar IN_DONT_FOLLOW: don't follow a symlink (new in kernel 2.6.15). | ||
414 | IN_ONLYDIR we can make sure that we don't watch | ||
415 | the target of symlinks. | ||
416 | @type IN_DONT_FOLLOW: int | ||
417 | @cvar IN_EXCL_UNLINK: Events are not generated for children after they | ||
418 | have been unlinked from the watched directory. | ||
419 | (new in kernel 2.6.36). | ||
420 | @type IN_EXCL_UNLINK: int | ||
421 | @cvar IN_MASK_ADD: add to the mask of an already existing watch (new | ||
422 | in kernel 2.6.14). | ||
423 | @type IN_MASK_ADD: int | ||
424 | @cvar IN_ISDIR: Event occurred against dir. | ||
425 | @type IN_ISDIR: int | ||
426 | @cvar IN_ONESHOT: Only send event once. | ||
427 | @type IN_ONESHOT: int | ||
428 | @cvar ALL_EVENTS: Alias for considering all of the events. | ||
429 | @type ALL_EVENTS: int | ||
430 | """ | ||
431 | |||
432 | # The idea here is 'configuration-as-code' - this way, we get our nice class | ||
433 | # constants, but we also get nice human-friendly text mappings to do lookups | ||
434 | # against as well, for free: | ||
435 | FLAG_COLLECTIONS = {'OP_FLAGS': { | ||
436 | 'IN_ACCESS' : 0x00000001, # File was accessed | ||
437 | 'IN_MODIFY' : 0x00000002, # File was modified | ||
438 | 'IN_ATTRIB' : 0x00000004, # Metadata changed | ||
439 | 'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed | ||
440 | 'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed | ||
441 | 'IN_OPEN' : 0x00000020, # File was opened | ||
442 | 'IN_MOVED_FROM' : 0x00000040, # File was moved from X | ||
443 | 'IN_MOVED_TO' : 0x00000080, # File was moved to Y | ||
444 | 'IN_CREATE' : 0x00000100, # Subfile was created | ||
445 | 'IN_DELETE' : 0x00000200, # Subfile was deleted | ||
446 | 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself) | ||
447 | # was deleted | ||
448 | 'IN_MOVE_SELF' : 0x00000800, # Self (watched item itself) was moved | ||
449 | }, | ||
450 | 'EVENT_FLAGS': { | ||
451 | 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted | ||
452 | 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed | ||
453 | 'IN_IGNORED' : 0x00008000, # File was ignored | ||
454 | }, | ||
455 | 'SPECIAL_FLAGS': { | ||
456 | 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a | ||
457 | # directory | ||
458 | 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink | ||
459 | 'IN_EXCL_UNLINK' : 0x04000000, # exclude events on unlinked objects | ||
460 | 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already | ||
461 | # existing watch | ||
462 | 'IN_ISDIR' : 0x40000000, # event occurred against dir | ||
463 | 'IN_ONESHOT' : 0x80000000, # only send event once | ||
464 | }, | ||
465 | } | ||
466 | |||
467 | def maskname(mask): | ||
468 | """ | ||
469 | Returns the event name associated to mask. IN_ISDIR is appended to | ||
470 | the result when appropriate. Note: only one event is returned, because | ||
471 | only one event can be raised at a given time. | ||
472 | |||
473 | @param mask: mask. | ||
474 | @type mask: int | ||
475 | @return: event name. | ||
476 | @rtype: str | ||
477 | """ | ||
478 | ms = mask | ||
479 | name = '%s' | ||
480 | if mask & IN_ISDIR: | ||
481 | ms = mask - IN_ISDIR | ||
482 | name = '%s|IN_ISDIR' | ||
483 | return name % EventsCodes.ALL_VALUES[ms] | ||
484 | |||
485 | maskname = staticmethod(maskname) | ||
486 | |||
487 | |||
488 | # So let's now turn the configuration into code | ||
489 | EventsCodes.ALL_FLAGS = {} | ||
490 | EventsCodes.ALL_VALUES = {} | ||
491 | for flagc, valc in EventsCodes.FLAG_COLLECTIONS.items(): | ||
492 | # Make the collections' members directly accessible through the | ||
493 | # class dictionary | ||
494 | setattr(EventsCodes, flagc, valc) | ||
495 | |||
496 | # Collect all the flags under a common umbrella | ||
497 | EventsCodes.ALL_FLAGS.update(valc) | ||
498 | |||
499 | # Make the individual masks accessible as 'constants' at globals() scope | ||
500 | # and masknames accessible by values. | ||
501 | for name, val in valc.items(): | ||
502 | globals()[name] = val | ||
503 | EventsCodes.ALL_VALUES[val] = name | ||
504 | |||
505 | |||
506 | # all 'normal' events | ||
507 | ALL_EVENTS = reduce(lambda x, y: x | y, EventsCodes.OP_FLAGS.values()) | ||
508 | EventsCodes.ALL_FLAGS['ALL_EVENTS'] = ALL_EVENTS | ||
509 | EventsCodes.ALL_VALUES[ALL_EVENTS] = 'ALL_EVENTS' | ||
510 | |||
511 | |||
512 | class _Event: | ||
513 | """ | ||
514 | Event structure, represent events raised by the system. This | ||
515 | is the base class and should be subclassed. | ||
516 | |||
517 | """ | ||
518 | def __init__(self, dict_): | ||
519 | """ | ||
520 | Attach attributes (contained in dict_) to self. | ||
521 | |||
522 | @param dict_: Set of attributes. | ||
523 | @type dict_: dictionary | ||
524 | """ | ||
525 | for tpl in dict_.items(): | ||
526 | setattr(self, *tpl) | ||
527 | |||
528 | def __repr__(self): | ||
529 | """ | ||
530 | @return: Generic event string representation. | ||
531 | @rtype: str | ||
532 | """ | ||
533 | s = '' | ||
534 | for attr, value in sorted(self.__dict__.items(), key=lambda x: x[0]): | ||
535 | if attr.startswith('_'): | ||
536 | continue | ||
537 | if attr == 'mask': | ||
538 | value = hex(getattr(self, attr)) | ||
539 | elif isinstance(value, basestring) and not value: | ||
540 | value = "''" | ||
541 | s += ' %s%s%s' % (output_format.field_name(attr), | ||
542 | output_format.punctuation('='), | ||
543 | output_format.field_value(value)) | ||
544 | |||
545 | s = '%s%s%s %s' % (output_format.punctuation('<'), | ||
546 | output_format.class_name(self.__class__.__name__), | ||
547 | s, | ||
548 | output_format.punctuation('>')) | ||
549 | return s | ||
550 | |||
551 | def __str__(self): | ||
552 | return repr(self) | ||
553 | |||
554 | |||
555 | class _RawEvent(_Event): | ||
556 | """ | ||
557 | Raw event, it contains only the informations provided by the system. | ||
558 | It doesn't infer anything. | ||
559 | """ | ||
560 | def __init__(self, wd, mask, cookie, name): | ||
561 | """ | ||
562 | @param wd: Watch Descriptor. | ||
563 | @type wd: int | ||
564 | @param mask: Bitmask of events. | ||
565 | @type mask: int | ||
566 | @param cookie: Cookie. | ||
567 | @type cookie: int | ||
568 | @param name: Basename of the file or directory against which the | ||
569 | event was raised in case where the watched directory | ||
570 | is the parent directory. None if the event was raised | ||
571 | on the watched item itself. | ||
572 | @type name: string or None | ||
573 | """ | ||
574 | # Use this variable to cache the result of str(self), this object | ||
575 | # is immutable. | ||
576 | self._str = None | ||
577 | # name: remove trailing '\0' | ||
578 | d = {'wd': wd, | ||
579 | 'mask': mask, | ||
580 | 'cookie': cookie, | ||
581 | 'name': name.rstrip('\0')} | ||
582 | _Event.__init__(self, d) | ||
583 | log.debug(str(self)) | ||
584 | |||
585 | def __str__(self): | ||
586 | if self._str is None: | ||
587 | self._str = _Event.__str__(self) | ||
588 | return self._str | ||
589 | |||
590 | |||
591 | class Event(_Event): | ||
592 | """ | ||
593 | This class contains all the useful informations about the observed | ||
594 | event. However, the presence of each field is not guaranteed and | ||
595 | depends on the type of event. In effect, some fields are irrelevant | ||
596 | for some kind of event (for example 'cookie' is meaningless for | ||
597 | IN_CREATE whereas it is mandatory for IN_MOVE_TO). | ||
598 | |||
599 | The possible fields are: | ||
600 | - wd (int): Watch Descriptor. | ||
601 | - mask (int): Mask. | ||
602 | - maskname (str): Readable event name. | ||
603 | - path (str): path of the file or directory being watched. | ||
604 | - name (str): Basename of the file or directory against which the | ||
605 | event was raised in case where the watched directory | ||
606 | is the parent directory. None if the event was raised | ||
607 | on the watched item itself. This field is always provided | ||
608 | even if the string is ''. | ||
609 | - pathname (str): Concatenation of 'path' and 'name'. | ||
610 | - src_pathname (str): Only present for IN_MOVED_TO events and only in | ||
611 | the case where IN_MOVED_FROM events are watched too. Holds the | ||
612 | source pathname from where pathname was moved from. | ||
613 | - cookie (int): Cookie. | ||
614 | - dir (bool): True if the event was raised against a directory. | ||
615 | |||
616 | """ | ||
617 | def __init__(self, raw): | ||
618 | """ | ||
619 | Concretely, this is the raw event plus inferred infos. | ||
620 | """ | ||
621 | _Event.__init__(self, raw) | ||
622 | self.maskname = EventsCodes.maskname(self.mask) | ||
623 | if COMPATIBILITY_MODE: | ||
624 | self.event_name = self.maskname | ||
625 | try: | ||
626 | if self.name: | ||
627 | self.pathname = os.path.abspath(os.path.join(self.path, | ||
628 | self.name)) | ||
629 | else: | ||
630 | self.pathname = os.path.abspath(self.path) | ||
631 | except AttributeError, err: | ||
632 | # Usually it is not an error some events are perfectly valids | ||
633 | # despite the lack of these attributes. | ||
634 | log.debug(err) | ||
635 | |||
636 | |||
637 | class ProcessEventError(PyinotifyError): | ||
638 | """ | ||
639 | ProcessEventError Exception. Raised on ProcessEvent error. | ||
640 | """ | ||
641 | def __init__(self, err): | ||
642 | """ | ||
643 | @param err: Exception error description. | ||
644 | @type err: string | ||
645 | """ | ||
646 | PyinotifyError.__init__(self, err) | ||
647 | |||
648 | |||
649 | class _ProcessEvent: | ||
650 | """ | ||
651 | Abstract processing event class. | ||
652 | """ | ||
653 | def __call__(self, event): | ||
654 | """ | ||
655 | To behave like a functor the object must be callable. | ||
656 | This method is a dispatch method. Its lookup order is: | ||
657 | 1. process_MASKNAME method | ||
658 | 2. process_FAMILY_NAME method | ||
659 | 3. otherwise calls process_default | ||
660 | |||
661 | @param event: Event to be processed. | ||
662 | @type event: Event object | ||
663 | @return: By convention when used from the ProcessEvent class: | ||
664 | - Returning False or None (default value) means keep on | ||
665 | executing next chained functors (see chain.py example). | ||
666 | - Returning True instead means do not execute next | ||
667 | processing functions. | ||
668 | @rtype: bool | ||
669 | @raise ProcessEventError: Event object undispatchable, | ||
670 | unknown event. | ||
671 | """ | ||
672 | stripped_mask = event.mask - (event.mask & IN_ISDIR) | ||
673 | maskname = EventsCodes.ALL_VALUES.get(stripped_mask) | ||
674 | if maskname is None: | ||
675 | raise ProcessEventError("Unknown mask 0x%08x" % stripped_mask) | ||
676 | |||
677 | # 1- look for process_MASKNAME | ||
678 | meth = getattr(self, 'process_' + maskname, None) | ||
679 | if meth is not None: | ||
680 | return meth(event) | ||
681 | # 2- look for process_FAMILY_NAME | ||
682 | meth = getattr(self, 'process_IN_' + maskname.split('_')[1], None) | ||
683 | if meth is not None: | ||
684 | return meth(event) | ||
685 | # 3- default call method process_default | ||
686 | return self.process_default(event) | ||
687 | |||
688 | def __repr__(self): | ||
689 | return '<%s>' % self.__class__.__name__ | ||
690 | |||
691 | |||
692 | class _SysProcessEvent(_ProcessEvent): | ||
693 | """ | ||
694 | There is three kind of processing according to each event: | ||
695 | |||
696 | 1. special handling (deletion from internal container, bug, ...). | ||
697 | 2. default treatment: which is applied to the majority of events. | ||
698 | 3. IN_ISDIR is never sent alone, he is piggybacked with a standard | ||
699 | event, he is not processed as the others events, instead, its | ||
700 | value is captured and appropriately aggregated to dst event. | ||
701 | """ | ||
702 | def __init__(self, wm, notifier): | ||
703 | """ | ||
704 | |||
705 | @param wm: Watch Manager. | ||
706 | @type wm: WatchManager instance | ||
707 | @param notifier: Notifier. | ||
708 | @type notifier: Notifier instance | ||
709 | """ | ||
710 | self._watch_manager = wm # watch manager | ||
711 | self._notifier = notifier # notifier | ||
712 | self._mv_cookie = {} # {cookie(int): (src_path(str), date), ...} | ||
713 | self._mv = {} # {src_path(str): (dst_path(str), date), ...} | ||
714 | |||
715 | def cleanup(self): | ||
716 | """ | ||
717 | Cleanup (delete) old (>1mn) records contained in self._mv_cookie | ||
718 | and self._mv. | ||
719 | """ | ||
720 | date_cur_ = datetime.now() | ||
721 | for seq in [self._mv_cookie, self._mv]: | ||
722 | for k in seq.keys(): | ||
723 | if (date_cur_ - seq[k][1]) > timedelta(minutes=1): | ||
724 | log.debug('Cleanup: deleting entry %s', seq[k][0]) | ||
725 | del seq[k] | ||
726 | |||
727 | def process_IN_CREATE(self, raw_event): | ||
728 | """ | ||
729 | If the event affects a directory and the auto_add flag of the | ||
730 | targetted watch is set to True, a new watch is added on this | ||
731 | new directory, with the same attribute values than those of | ||
732 | this watch. | ||
733 | """ | ||
734 | if raw_event.mask & IN_ISDIR: | ||
735 | watch_ = self._watch_manager.get_watch(raw_event.wd) | ||
736 | created_dir = os.path.join(watch_.path, raw_event.name) | ||
737 | if watch_.auto_add and not watch_.exclude_filter(created_dir): | ||
738 | addw = self._watch_manager.add_watch | ||
739 | # The newly monitored directory inherits attributes from its | ||
740 | # parent directory. | ||
741 | addw_ret = addw(created_dir, watch_.mask, | ||
742 | proc_fun=watch_.proc_fun, | ||
743 | rec=False, auto_add=watch_.auto_add, | ||
744 | exclude_filter=watch_.exclude_filter) | ||
745 | |||
746 | # Trick to handle mkdir -p /d1/d2/t3 where d1 is watched and | ||
747 | # d2 and t3 (directory or file) are created. | ||
748 | # Since the directory d2 is new, then everything inside it must | ||
749 | # also be new. | ||
750 | created_dir_wd = addw_ret.get(created_dir) | ||
751 | if ((created_dir_wd is not None) and (created_dir_wd > 0) and | ||
752 | os.path.isdir(created_dir)): | ||
753 | try: | ||
754 | for name in os.listdir(created_dir): | ||
755 | inner = os.path.join(created_dir, name) | ||
756 | if self._watch_manager.get_wd(inner) is not None: | ||
757 | continue | ||
758 | # Generate (simulate) creation events for sub- | ||
759 | # directories and files. | ||
760 | if os.path.isfile(inner): | ||
761 | # symlinks are handled as files. | ||
762 | flags = IN_CREATE | ||
763 | elif os.path.isdir(inner): | ||
764 | flags = IN_CREATE | IN_ISDIR | ||
765 | else: | ||
766 | # This path should not be taken. | ||
767 | continue | ||
768 | rawevent = _RawEvent(created_dir_wd, flags, 0, name) | ||
769 | self._notifier.append_event(rawevent) | ||
770 | except OSError, err: | ||
771 | msg = "process_IN_CREATE, invalid directory %s: %s" | ||
772 | log.debug(msg % (created_dir, str(err))) | ||
773 | return self.process_default(raw_event) | ||
774 | |||
775 | def process_IN_MOVED_FROM(self, raw_event): | ||
776 | """ | ||
777 | Map the cookie with the source path (+ date for cleaning). | ||
778 | """ | ||
779 | watch_ = self._watch_manager.get_watch(raw_event.wd) | ||
780 | path_ = watch_.path | ||
781 | src_path = os.path.normpath(os.path.join(path_, raw_event.name)) | ||
782 | self._mv_cookie[raw_event.cookie] = (src_path, datetime.now()) | ||
783 | return self.process_default(raw_event, {'cookie': raw_event.cookie}) | ||
784 | |||
785 | def process_IN_MOVED_TO(self, raw_event): | ||
786 | """ | ||
787 | Map the source path with the destination path (+ date for | ||
788 | cleaning). | ||
789 | """ | ||
790 | watch_ = self._watch_manager.get_watch(raw_event.wd) | ||
791 | path_ = watch_.path | ||
792 | dst_path = os.path.normpath(os.path.join(path_, raw_event.name)) | ||
793 | mv_ = self._mv_cookie.get(raw_event.cookie) | ||
794 | to_append = {'cookie': raw_event.cookie} | ||
795 | if mv_ is not None: | ||
796 | self._mv[mv_[0]] = (dst_path, datetime.now()) | ||
797 | # Let's assume that IN_MOVED_FROM event is always queued before | ||
798 | # that its associated (they share a common cookie) IN_MOVED_TO | ||
799 | # event is queued itself. It is then possible in that scenario | ||
800 | # to provide as additional information to the IN_MOVED_TO event | ||
801 | # the original pathname of the moved file/directory. | ||
802 | to_append['src_pathname'] = mv_[0] | ||
803 | elif (raw_event.mask & IN_ISDIR and watch_.auto_add and | ||
804 | not watch_.exclude_filter(dst_path)): | ||
805 | # We got a diretory that's "moved in" from an unknown source and | ||
806 | # auto_add is enabled. Manually add watches to the inner subtrees. | ||
807 | # The newly monitored directory inherits attributes from its | ||
808 | # parent directory. | ||
809 | self._watch_manager.add_watch(dst_path, watch_.mask, | ||
810 | proc_fun=watch_.proc_fun, | ||
811 | rec=True, auto_add=True, | ||
812 | exclude_filter=watch_.exclude_filter) | ||
813 | return self.process_default(raw_event, to_append) | ||
814 | |||
815 | def process_IN_MOVE_SELF(self, raw_event): | ||
816 | """ | ||
817 | STATUS: the following bug has been fixed in recent kernels (FIXME: | ||
818 | which version ?). Now it raises IN_DELETE_SELF instead. | ||
819 | |||
820 | Old kernels were bugged, this event raised when the watched item | ||
821 | were moved, so we had to update its path, but under some circumstances | ||
822 | it was impossible: if its parent directory and its destination | ||
823 | directory wasn't watched. The kernel (see include/linux/fsnotify.h) | ||
824 | doesn't bring us enough informations like the destination path of | ||
825 | moved items. | ||
826 | """ | ||
827 | watch_ = self._watch_manager.get_watch(raw_event.wd) | ||
828 | src_path = watch_.path | ||
829 | mv_ = self._mv.get(src_path) | ||
830 | if mv_: | ||
831 | dest_path = mv_[0] | ||
832 | watch_.path = dest_path | ||
833 | # add the separator to the source path to avoid overlapping | ||
834 | # path issue when testing with startswith() | ||
835 | src_path += os.path.sep | ||
836 | src_path_len = len(src_path) | ||
837 | # The next loop renames all watches with src_path as base path. | ||
838 | # It seems that IN_MOVE_SELF does not provide IN_ISDIR information | ||
839 | # therefore the next loop is iterated even if raw_event is a file. | ||
840 | for w in self._watch_manager.watches.values(): | ||
841 | if w.path.startswith(src_path): | ||
842 | # Note that dest_path is a normalized path. | ||
843 | w.path = os.path.join(dest_path, w.path[src_path_len:]) | ||
844 | else: | ||
845 | log.error("The pathname '%s' of this watch %s has probably changed " | ||
846 | "and couldn't be updated, so it cannot be trusted " | ||
847 | "anymore. To fix this error move directories/files only " | ||
848 | "between watched parents directories, in this case e.g. " | ||
849 | "put a watch on '%s'.", | ||
850 | watch_.path, watch_, | ||
851 | os.path.normpath(os.path.join(watch_.path, | ||
852 | os.path.pardir))) | ||
853 | if not watch_.path.endswith('-unknown-path'): | ||
854 | watch_.path += '-unknown-path' | ||
855 | return self.process_default(raw_event) | ||
856 | |||
857 | def process_IN_Q_OVERFLOW(self, raw_event): | ||
858 | """ | ||
859 | Only signal an overflow, most of the common flags are irrelevant | ||
860 | for this event (path, wd, name). | ||
861 | """ | ||
862 | return Event({'mask': raw_event.mask}) | ||
863 | |||
864 | def process_IN_IGNORED(self, raw_event): | ||
865 | """ | ||
866 | The watch descriptor raised by this event is now ignored (forever), | ||
867 | it can be safely deleted from the watch manager dictionary. | ||
868 | After this event we can be sure that neither the event queue nor | ||
869 | the system will raise an event associated to this wd again. | ||
870 | """ | ||
871 | event_ = self.process_default(raw_event) | ||
872 | self._watch_manager.del_watch(raw_event.wd) | ||
873 | return event_ | ||
874 | |||
875 | def process_default(self, raw_event, to_append=None): | ||
876 | """ | ||
877 | Commons handling for the followings events: | ||
878 | |||
879 | IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, | ||
880 | IN_OPEN, IN_DELETE, IN_DELETE_SELF, IN_UNMOUNT. | ||
881 | """ | ||
882 | watch_ = self._watch_manager.get_watch(raw_event.wd) | ||
883 | if raw_event.mask & (IN_DELETE_SELF | IN_MOVE_SELF): | ||
884 | # Unfornulately this information is not provided by the kernel | ||
885 | dir_ = watch_.dir | ||
886 | else: | ||
887 | dir_ = bool(raw_event.mask & IN_ISDIR) | ||
888 | dict_ = {'wd': raw_event.wd, | ||
889 | 'mask': raw_event.mask, | ||
890 | 'path': watch_.path, | ||
891 | 'name': raw_event.name, | ||
892 | 'dir': dir_} | ||
893 | if COMPATIBILITY_MODE: | ||
894 | dict_['is_dir'] = dir_ | ||
895 | if to_append is not None: | ||
896 | dict_.update(to_append) | ||
897 | return Event(dict_) | ||
898 | |||
899 | |||
900 | class ProcessEvent(_ProcessEvent): | ||
901 | """ | ||
902 | Process events objects, can be specialized via subclassing, thus its | ||
903 | behavior can be overriden: | ||
904 | |||
905 | Note: you should not override __init__ in your subclass instead define | ||
906 | a my_init() method, this method will be called automatically from the | ||
907 | constructor of this class with its optionals parameters. | ||
908 | |||
909 | 1. Provide specialized individual methods, e.g. process_IN_DELETE for | ||
910 | processing a precise type of event (e.g. IN_DELETE in this case). | ||
911 | 2. Or/and provide methods for processing events by 'family', e.g. | ||
912 | process_IN_CLOSE method will process both IN_CLOSE_WRITE and | ||
913 | IN_CLOSE_NOWRITE events (if process_IN_CLOSE_WRITE and | ||
914 | process_IN_CLOSE_NOWRITE aren't defined though). | ||
915 | 3. Or/and override process_default for catching and processing all | ||
916 | the remaining types of events. | ||
917 | """ | ||
918 | pevent = None | ||
919 | |||
920 | def __init__(self, pevent=None, **kargs): | ||
921 | """ | ||
922 | Enable chaining of ProcessEvent instances. | ||
923 | |||
924 | @param pevent: Optional callable object, will be called on event | ||
925 | processing (before self). | ||
926 | @type pevent: callable | ||
927 | @param kargs: This constructor is implemented as a template method | ||
928 | delegating its optionals keyworded arguments to the | ||
929 | method my_init(). | ||
930 | @type kargs: dict | ||
931 | """ | ||
932 | self.pevent = pevent | ||
933 | self.my_init(**kargs) | ||
934 | |||
935 | def my_init(self, **kargs): | ||
936 | """ | ||
937 | This method is called from ProcessEvent.__init__(). This method is | ||
938 | empty here and must be redefined to be useful. In effect, if you | ||
939 | need to specifically initialize your subclass' instance then you | ||
940 | just have to override this method in your subclass. Then all the | ||
941 | keyworded arguments passed to ProcessEvent.__init__() will be | ||
942 | transmitted as parameters to this method. Beware you MUST pass | ||
943 | keyword arguments though. | ||
944 | |||
945 | @param kargs: optional delegated arguments from __init__(). | ||
946 | @type kargs: dict | ||
947 | """ | ||
948 | pass | ||
949 | |||
950 | def __call__(self, event): | ||
951 | stop_chaining = False | ||
952 | if self.pevent is not None: | ||
953 | # By default methods return None so we set as guideline | ||
954 | # that methods asking for stop chaining must explicitely | ||
955 | # return non None or non False values, otherwise the default | ||
956 | # behavior will be to accept chain call to the corresponding | ||
957 | # local method. | ||
958 | stop_chaining = self.pevent(event) | ||
959 | if not stop_chaining: | ||
960 | return _ProcessEvent.__call__(self, event) | ||
961 | |||
962 | def nested_pevent(self): | ||
963 | return self.pevent | ||
964 | |||
965 | def process_IN_Q_OVERFLOW(self, event): | ||
966 | """ | ||
967 | By default this method only reports warning messages, you can overredide | ||
968 | it by subclassing ProcessEvent and implement your own | ||
969 | process_IN_Q_OVERFLOW method. The actions you can take on receiving this | ||
970 | event is either to update the variable max_queued_events in order to | ||
971 | handle more simultaneous events or to modify your code in order to | ||
972 | accomplish a better filtering diminishing the number of raised events. | ||
973 | Because this method is defined, IN_Q_OVERFLOW will never get | ||
974 | transmitted as arguments to process_default calls. | ||
975 | |||
976 | @param event: IN_Q_OVERFLOW event. | ||
977 | @type event: dict | ||
978 | """ | ||
979 | log.warning('Event queue overflowed.') | ||
980 | |||
981 | def process_default(self, event): | ||
982 | """ | ||
983 | Default processing event method. By default does nothing. Subclass | ||
984 | ProcessEvent and redefine this method in order to modify its behavior. | ||
985 | |||
986 | @param event: Event to be processed. Can be of any type of events but | ||
987 | IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). | ||
988 | @type event: Event instance | ||
989 | """ | ||
990 | pass | ||
991 | |||
992 | |||
993 | class PrintAllEvents(ProcessEvent): | ||
994 | """ | ||
995 | Dummy class used to print events strings representations. For instance this | ||
996 | class is used from command line to print all received events to stdout. | ||
997 | """ | ||
998 | def my_init(self, out=None): | ||
999 | """ | ||
1000 | @param out: Where events will be written. | ||
1001 | @type out: Object providing a valid file object interface. | ||
1002 | """ | ||
1003 | if out is None: | ||
1004 | out = sys.stdout | ||
1005 | self._out = out | ||
1006 | |||
1007 | def process_default(self, event): | ||
1008 | """ | ||
1009 | Writes event string representation to file object provided to | ||
1010 | my_init(). | ||
1011 | |||
1012 | @param event: Event to be processed. Can be of any type of events but | ||
1013 | IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). | ||
1014 | @type event: Event instance | ||
1015 | """ | ||
1016 | self._out.write(str(event)) | ||
1017 | self._out.write('\n') | ||
1018 | self._out.flush() | ||
1019 | |||
1020 | |||
1021 | class ChainIfTrue(ProcessEvent): | ||
1022 | """ | ||
1023 | Makes conditional chaining depending on the result of the nested | ||
1024 | processing instance. | ||
1025 | """ | ||
1026 | def my_init(self, func): | ||
1027 | """ | ||
1028 | Method automatically called from base class constructor. | ||
1029 | """ | ||
1030 | self._func = func | ||
1031 | |||
1032 | def process_default(self, event): | ||
1033 | return not self._func(event) | ||
1034 | |||
1035 | |||
1036 | class Stats(ProcessEvent): | ||
1037 | """ | ||
1038 | Compute and display trivial statistics about processed events. | ||
1039 | """ | ||
1040 | def my_init(self): | ||
1041 | """ | ||
1042 | Method automatically called from base class constructor. | ||
1043 | """ | ||
1044 | self._start_time = time.time() | ||
1045 | self._stats = {} | ||
1046 | self._stats_lock = threading.Lock() | ||
1047 | |||
1048 | def process_default(self, event): | ||
1049 | """ | ||
1050 | Processes |event|. | ||
1051 | """ | ||
1052 | self._stats_lock.acquire() | ||
1053 | try: | ||
1054 | events = event.maskname.split('|') | ||
1055 | for event_name in events: | ||
1056 | count = self._stats.get(event_name, 0) | ||
1057 | self._stats[event_name] = count + 1 | ||
1058 | finally: | ||
1059 | self._stats_lock.release() | ||
1060 | |||
1061 | def _stats_copy(self): | ||
1062 | self._stats_lock.acquire() | ||
1063 | try: | ||
1064 | return self._stats.copy() | ||
1065 | finally: | ||
1066 | self._stats_lock.release() | ||
1067 | |||
1068 | def __repr__(self): | ||
1069 | stats = self._stats_copy() | ||
1070 | |||
1071 | elapsed = int(time.time() - self._start_time) | ||
1072 | elapsed_str = '' | ||
1073 | if elapsed < 60: | ||
1074 | elapsed_str = str(elapsed) + 'sec' | ||
1075 | elif 60 <= elapsed < 3600: | ||
1076 | elapsed_str = '%dmn%dsec' % (elapsed / 60, elapsed % 60) | ||
1077 | elif 3600 <= elapsed < 86400: | ||
1078 | elapsed_str = '%dh%dmn' % (elapsed / 3600, (elapsed % 3600) / 60) | ||
1079 | elif elapsed >= 86400: | ||
1080 | elapsed_str = '%dd%dh' % (elapsed / 86400, (elapsed % 86400) / 3600) | ||
1081 | stats['ElapsedTime'] = elapsed_str | ||
1082 | |||
1083 | l = [] | ||
1084 | for ev, value in sorted(stats.items(), key=lambda x: x[0]): | ||
1085 | l.append(' %s=%s' % (output_format.field_name(ev), | ||
1086 | output_format.field_value(value))) | ||
1087 | s = '<%s%s >' % (output_format.class_name(self.__class__.__name__), | ||
1088 | ''.join(l)) | ||
1089 | return s | ||
1090 | |||
1091 | def dump(self, filename): | ||
1092 | """ | ||
1093 | Dumps statistics. | ||
1094 | |||
1095 | @param filename: filename where stats will be dumped, filename is | ||
1096 | created and must not exist prior to this call. | ||
1097 | @type filename: string | ||
1098 | """ | ||
1099 | flags = os.O_WRONLY|os.O_CREAT|os.O_NOFOLLOW|os.O_EXCL | ||
1100 | fd = os.open(filename, flags, 0600) | ||
1101 | os.write(fd, str(self)) | ||
1102 | os.close(fd) | ||
1103 | |||
1104 | def __str__(self, scale=45): | ||
1105 | stats = self._stats_copy() | ||
1106 | if not stats: | ||
1107 | return '' | ||
1108 | |||
1109 | m = max(stats.values()) | ||
1110 | unity = float(scale) / m | ||
1111 | fmt = '%%-26s%%-%ds%%s' % (len(output_format.field_value('@' * scale)) | ||
1112 | + 1) | ||
1113 | def func(x): | ||
1114 | return fmt % (output_format.field_name(x[0]), | ||
1115 | output_format.field_value('@' * int(x[1] * unity)), | ||
1116 | output_format.simple('%d' % x[1], 'yellow')) | ||
1117 | s = '\n'.join(map(func, sorted(stats.items(), key=lambda x: x[0]))) | ||
1118 | return s | ||
1119 | |||
1120 | |||
1121 | class NotifierError(PyinotifyError): | ||
1122 | """ | ||
1123 | Notifier Exception. Raised on Notifier error. | ||
1124 | |||
1125 | """ | ||
1126 | def __init__(self, err): | ||
1127 | """ | ||
1128 | @param err: Exception string's description. | ||
1129 | @type err: string | ||
1130 | """ | ||
1131 | PyinotifyError.__init__(self, err) | ||
1132 | |||
1133 | |||
1134 | class Notifier: | ||
1135 | """ | ||
1136 | Read notifications, process events. | ||
1137 | |||
1138 | """ | ||
1139 | def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, | ||
1140 | threshold=0, timeout=None): | ||
1141 | """ | ||
1142 | Initialization. read_freq, threshold and timeout parameters are used | ||
1143 | when looping. | ||
1144 | |||
1145 | @param watch_manager: Watch Manager. | ||
1146 | @type watch_manager: WatchManager instance | ||
1147 | @param default_proc_fun: Default processing method. If None, a new | ||
1148 | instance of PrintAllEvents will be assigned. | ||
1149 | @type default_proc_fun: instance of ProcessEvent | ||
1150 | @param read_freq: if read_freq == 0, events are read asap, | ||
1151 | if read_freq is > 0, this thread sleeps | ||
1152 | max(0, read_freq - timeout) seconds. But if | ||
1153 | timeout is None it may be different because | ||
1154 | poll is blocking waiting for something to read. | ||
1155 | @type read_freq: int | ||
1156 | @param threshold: File descriptor will be read only if the accumulated | ||
1157 | size to read becomes >= threshold. If != 0, you likely | ||
1158 | want to use it in combination with an appropriate | ||
1159 | value for read_freq because without that you would | ||
1160 | keep looping without really reading anything and that | ||
1161 | until the amount of events to read is >= threshold. | ||
1162 | At least with read_freq set you might sleep. | ||
1163 | @type threshold: int | ||
1164 | @param timeout: | ||
1165 | https://docs.python.org/3/library/select.html#polling-objects | ||
1166 | @type timeout: int | ||
1167 | """ | ||
1168 | # Watch Manager instance | ||
1169 | self._watch_manager = watch_manager | ||
1170 | # File descriptor | ||
1171 | self._fd = self._watch_manager.get_fd() | ||
1172 | # Poll object and registration | ||
1173 | self._pollobj = select.poll() | ||
1174 | self._pollobj.register(self._fd, select.POLLIN) | ||
1175 | # This pipe is correctely initialized and used by ThreadedNotifier | ||
1176 | self._pipe = (-1, -1) | ||
1177 | # Event queue | ||
1178 | self._eventq = deque() | ||
1179 | # System processing functor, common to all events | ||
1180 | self._sys_proc_fun = _SysProcessEvent(self._watch_manager, self) | ||
1181 | # Default processing method | ||
1182 | self._default_proc_fun = default_proc_fun | ||
1183 | if default_proc_fun is None: | ||
1184 | self._default_proc_fun = PrintAllEvents() | ||
1185 | # Loop parameters | ||
1186 | self._read_freq = read_freq | ||
1187 | self._threshold = threshold | ||
1188 | self._timeout = timeout | ||
1189 | # Coalesce events option | ||
1190 | self._coalesce = False | ||
1191 | # set of str(raw_event), only used when coalesce option is True | ||
1192 | self._eventset = set() | ||
1193 | |||
1194 | def append_event(self, event): | ||
1195 | """ | ||
1196 | Append a raw event to the event queue. | ||
1197 | |||
1198 | @param event: An event. | ||
1199 | @type event: _RawEvent instance. | ||
1200 | """ | ||
1201 | self._eventq.append(event) | ||
1202 | |||
1203 | def proc_fun(self): | ||
1204 | return self._default_proc_fun | ||
1205 | |||
1206 | def coalesce_events(self, coalesce=True): | ||
1207 | """ | ||
1208 | Coalescing events. Events are usually processed by batchs, their size | ||
1209 | depend on various factors. Thus, before processing them, events received | ||
1210 | from inotify are aggregated in a fifo queue. If this coalescing | ||
1211 | option is enabled events are filtered based on their unicity, only | ||
1212 | unique events are enqueued, doublons are discarded. An event is unique | ||
1213 | when the combination of its fields (wd, mask, cookie, name) is unique | ||
1214 | among events of a same batch. After a batch of events is processed any | ||
1215 | events is accepted again. By default this option is disabled, you have | ||
1216 | to explictly call this function to turn it on. | ||
1217 | |||
1218 | @param coalesce: Optional new coalescing value. True by default. | ||
1219 | @type coalesce: Bool | ||
1220 | """ | ||
1221 | self._coalesce = coalesce | ||
1222 | if not coalesce: | ||
1223 | self._eventset.clear() | ||
1224 | |||
1225 | def check_events(self, timeout=None): | ||
1226 | """ | ||
1227 | Check for new events available to read, blocks up to timeout | ||
1228 | milliseconds. | ||
1229 | |||
1230 | @param timeout: If specified it overrides the corresponding instance | ||
1231 | attribute _timeout. | ||
1232 | @type timeout: int | ||
1233 | |||
1234 | @return: New events to read. | ||
1235 | @rtype: bool | ||
1236 | """ | ||
1237 | while True: | ||
1238 | try: | ||
1239 | # blocks up to 'timeout' milliseconds | ||
1240 | if timeout is None: | ||
1241 | timeout = self._timeout | ||
1242 | ret = self._pollobj.poll(timeout) | ||
1243 | except select.error, err: | ||
1244 | if err[0] == errno.EINTR: | ||
1245 | continue # interrupted, retry | ||
1246 | else: | ||
1247 | raise | ||
1248 | else: | ||
1249 | break | ||
1250 | |||
1251 | if not ret or (self._pipe[0] == ret[0][0]): | ||
1252 | return False | ||
1253 | # only one fd is polled | ||
1254 | return ret[0][1] & select.POLLIN | ||
1255 | |||
1256 | def read_events(self): | ||
1257 | """ | ||
1258 | Read events from device, build _RawEvents, and enqueue them. | ||
1259 | """ | ||
1260 | buf_ = array.array('i', [0]) | ||
1261 | # get event queue size | ||
1262 | if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: | ||
1263 | return | ||
1264 | queue_size = buf_[0] | ||
1265 | if queue_size < self._threshold: | ||
1266 | log.debug('(fd: %d) %d bytes available to read but threshold is ' | ||
1267 | 'fixed to %d bytes', self._fd, queue_size, | ||
1268 | self._threshold) | ||
1269 | return | ||
1270 | |||
1271 | try: | ||
1272 | # Read content from file | ||
1273 | r = os.read(self._fd, queue_size) | ||
1274 | except Exception, msg: | ||
1275 | raise NotifierError(msg) | ||
1276 | log.debug('Event queue size: %d', queue_size) | ||
1277 | rsum = 0 # counter | ||
1278 | while rsum < queue_size: | ||
1279 | s_size = 16 | ||
1280 | # Retrieve wd, mask, cookie and fname_len | ||
1281 | wd, mask, cookie, fname_len = struct.unpack('iIII', | ||
1282 | r[rsum:rsum+s_size]) | ||
1283 | # Retrieve name | ||
1284 | fname, = struct.unpack('%ds' % fname_len, | ||
1285 | r[rsum + s_size:rsum + s_size + fname_len]) | ||
1286 | rawevent = _RawEvent(wd, mask, cookie, fname) | ||
1287 | if self._coalesce: | ||
1288 | # Only enqueue new (unique) events. | ||
1289 | raweventstr = str(rawevent) | ||
1290 | if raweventstr not in self._eventset: | ||
1291 | self._eventset.add(raweventstr) | ||
1292 | self._eventq.append(rawevent) | ||
1293 | else: | ||
1294 | self._eventq.append(rawevent) | ||
1295 | rsum += s_size + fname_len | ||
1296 | |||
1297 | def process_events(self): | ||
1298 | """ | ||
1299 | Routine for processing events from queue by calling their | ||
1300 | associated proccessing method (an instance of ProcessEvent). | ||
1301 | It also does internal processings, to keep the system updated. | ||
1302 | """ | ||
1303 | while self._eventq: | ||
1304 | raw_event = self._eventq.popleft() # pop next event | ||
1305 | if self._watch_manager.ignore_events: | ||
1306 | log.debug("Event ignored: %s" % repr(raw_event)) | ||
1307 | continue | ||
1308 | watch_ = self._watch_manager.get_watch(raw_event.wd) | ||
1309 | if (watch_ is None) and not (raw_event.mask & IN_Q_OVERFLOW): | ||
1310 | if not (raw_event.mask & IN_IGNORED): | ||
1311 | # Not really sure how we ended up here, nor how we should | ||
1312 | # handle these types of events and if it is appropriate to | ||
1313 | # completly skip them (like we are doing here). | ||
1314 | log.warning("Unable to retrieve Watch object associated to %s", | ||
1315 | repr(raw_event)) | ||
1316 | continue | ||
1317 | revent = self._sys_proc_fun(raw_event) # system processings | ||
1318 | if watch_ and watch_.proc_fun: | ||
1319 | watch_.proc_fun(revent) # user processings | ||
1320 | else: | ||
1321 | self._default_proc_fun(revent) | ||
1322 | self._sys_proc_fun.cleanup() # remove olds MOVED_* events records | ||
1323 | if self._coalesce: | ||
1324 | self._eventset.clear() | ||
1325 | |||
1326 | def __daemonize(self, pid_file=None, stdin=os.devnull, stdout=os.devnull, | ||
1327 | stderr=os.devnull): | ||
1328 | """ | ||
1329 | @param pid_file: file where the pid will be written. If pid_file=None | ||
1330 | the pid is written to | ||
1331 | /var/run/<sys.argv[0]|pyinotify>.pid, if pid_file=False | ||
1332 | no pid_file is written. | ||
1333 | @param stdin: | ||
1334 | @param stdout: | ||
1335 | @param stderr: files associated to common streams. | ||
1336 | """ | ||
1337 | if pid_file is None: | ||
1338 | dirname = '/var/run/' | ||
1339 | basename = os.path.basename(sys.argv[0]) or 'pyinotify' | ||
1340 | pid_file = os.path.join(dirname, basename + '.pid') | ||
1341 | |||
1342 | if pid_file != False and os.path.lexists(pid_file): | ||
1343 | err = 'Cannot daemonize: pid file %s already exists.' % pid_file | ||
1344 | raise NotifierError(err) | ||
1345 | |||
1346 | def fork_daemon(): | ||
1347 | # Adapted from Chad J. Schroeder's recipe | ||
1348 | # @see http://code.activestate.com/recipes/278731/ | ||
1349 | pid = os.fork() | ||
1350 | if (pid == 0): | ||
1351 | # parent 2 | ||
1352 | os.setsid() | ||
1353 | pid = os.fork() | ||
1354 | if (pid == 0): | ||
1355 | # child | ||
1356 | os.chdir('/') | ||
1357 | os.umask(022) | ||
1358 | else: | ||
1359 | # parent 2 | ||
1360 | os._exit(0) | ||
1361 | else: | ||
1362 | # parent 1 | ||
1363 | os._exit(0) | ||
1364 | |||
1365 | fd_inp = os.open(stdin, os.O_RDONLY) | ||
1366 | os.dup2(fd_inp, 0) | ||
1367 | fd_out = os.open(stdout, os.O_WRONLY|os.O_CREAT, 0600) | ||
1368 | os.dup2(fd_out, 1) | ||
1369 | fd_err = os.open(stderr, os.O_WRONLY|os.O_CREAT, 0600) | ||
1370 | os.dup2(fd_err, 2) | ||
1371 | |||
1372 | # Detach task | ||
1373 | fork_daemon() | ||
1374 | |||
1375 | # Write pid | ||
1376 | if pid_file != False: | ||
1377 | flags = os.O_WRONLY|os.O_CREAT|os.O_NOFOLLOW|os.O_EXCL | ||
1378 | fd_pid = os.open(pid_file, flags, 0600) | ||
1379 | os.write(fd_pid, str(os.getpid()) + '\n') | ||
1380 | os.close(fd_pid) | ||
1381 | # Register unlink function | ||
1382 | atexit.register(lambda : os.unlink(pid_file)) | ||
1383 | |||
1384 | def _sleep(self, ref_time): | ||
1385 | # Only consider sleeping if read_freq is > 0 | ||
1386 | if self._read_freq > 0: | ||
1387 | cur_time = time.time() | ||
1388 | sleep_amount = self._read_freq - (cur_time - ref_time) | ||
1389 | if sleep_amount > 0: | ||
1390 | log.debug('Now sleeping %d seconds', sleep_amount) | ||
1391 | time.sleep(sleep_amount) | ||
1392 | |||
1393 | def loop(self, callback=None, daemonize=False, **args): | ||
1394 | """ | ||
1395 | Events are read only one time every min(read_freq, timeout) | ||
1396 | seconds at best and only if the size to read is >= threshold. | ||
1397 | After this method returns it must not be called again for the same | ||
1398 | instance. | ||
1399 | |||
1400 | @param callback: Functor called after each event processing iteration. | ||
1401 | Expects to receive the notifier object (self) as first | ||
1402 | parameter. If this function returns True the loop is | ||
1403 | immediately terminated otherwise the loop method keeps | ||
1404 | looping. | ||
1405 | @type callback: callable object or function | ||
1406 | @param daemonize: This thread is daemonized if set to True. | ||
1407 | @type daemonize: boolean | ||
1408 | @param args: Optional and relevant only if daemonize is True. Remaining | ||
1409 | keyworded arguments are directly passed to daemonize see | ||
1410 | __daemonize() method. If pid_file=None or is set to a | ||
1411 | pathname the caller must ensure the file does not exist | ||
1412 | before this method is called otherwise an exception | ||
1413 | pyinotify.NotifierError will be raised. If pid_file=False | ||
1414 | it is still daemonized but the pid is not written in any | ||
1415 | file. | ||
1416 | @type args: various | ||
1417 | """ | ||
1418 | if daemonize: | ||
1419 | self.__daemonize(**args) | ||
1420 | |||
1421 | # Read and process events forever | ||
1422 | while 1: | ||
1423 | try: | ||
1424 | self.process_events() | ||
1425 | if (callback is not None) and (callback(self) is True): | ||
1426 | break | ||
1427 | ref_time = time.time() | ||
1428 | # check_events is blocking | ||
1429 | if self.check_events(): | ||
1430 | self._sleep(ref_time) | ||
1431 | self.read_events() | ||
1432 | except KeyboardInterrupt: | ||
1433 | # Stop monitoring if sigint is caught (Control-C). | ||
1434 | log.debug('Pyinotify stops monitoring.') | ||
1435 | break | ||
1436 | # Close internals | ||
1437 | self.stop() | ||
1438 | |||
1439 | def stop(self): | ||
1440 | """ | ||
1441 | Close inotify's instance (close its file descriptor). | ||
1442 | It destroys all existing watches, pending events,... | ||
1443 | This method is automatically called at the end of loop(). | ||
1444 | """ | ||
1445 | self._pollobj.unregister(self._fd) | ||
1446 | os.close(self._fd) | ||
1447 | self._sys_proc_fun = None | ||
1448 | |||
1449 | |||
1450 | class ThreadedNotifier(threading.Thread, Notifier): | ||
1451 | """ | ||
1452 | This notifier inherits from threading.Thread for instanciating a separate | ||
1453 | thread, and also inherits from Notifier, because it is a threaded notifier. | ||
1454 | |||
1455 | Note that every functionality provided by this class is also provided | ||
1456 | through Notifier class. Moreover Notifier should be considered first because | ||
1457 | it is not threaded and could be easily daemonized. | ||
1458 | """ | ||
1459 | def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, | ||
1460 | threshold=0, timeout=None): | ||
1461 | """ | ||
1462 | Initialization, initialize base classes. read_freq, threshold and | ||
1463 | timeout parameters are used when looping. | ||
1464 | |||
1465 | @param watch_manager: Watch Manager. | ||
1466 | @type watch_manager: WatchManager instance | ||
1467 | @param default_proc_fun: Default processing method. See base class. | ||
1468 | @type default_proc_fun: instance of ProcessEvent | ||
1469 | @param read_freq: if read_freq == 0, events are read asap, | ||
1470 | if read_freq is > 0, this thread sleeps | ||
1471 | max(0, read_freq - timeout) seconds. | ||
1472 | @type read_freq: int | ||
1473 | @param threshold: File descriptor will be read only if the accumulated | ||
1474 | size to read becomes >= threshold. If != 0, you likely | ||
1475 | want to use it in combination with an appropriate | ||
1476 | value set for read_freq because without that you would | ||
1477 | keep looping without really reading anything and that | ||
1478 | until the amount of events to read is >= threshold. At | ||
1479 | least with read_freq you might sleep. | ||
1480 | @type threshold: int | ||
1481 | @param timeout: | ||
1482 | https://docs.python.org/3/library/select.html#polling-objects | ||
1483 | @type timeout: int | ||
1484 | """ | ||
1485 | # Init threading base class | ||
1486 | threading.Thread.__init__(self) | ||
1487 | # Stop condition | ||
1488 | self._stop_event = threading.Event() | ||
1489 | # Init Notifier base class | ||
1490 | Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, | ||
1491 | threshold, timeout) | ||
1492 | # Create a new pipe used for thread termination | ||
1493 | self._pipe = os.pipe() | ||
1494 | self._pollobj.register(self._pipe[0], select.POLLIN) | ||
1495 | |||
1496 | def stop(self): | ||
1497 | """ | ||
1498 | Stop notifier's loop. Stop notification. Join the thread. | ||
1499 | """ | ||
1500 | self._stop_event.set() | ||
1501 | os.write(self._pipe[1], 'stop') | ||
1502 | threading.Thread.join(self) | ||
1503 | Notifier.stop(self) | ||
1504 | self._pollobj.unregister(self._pipe[0]) | ||
1505 | os.close(self._pipe[0]) | ||
1506 | os.close(self._pipe[1]) | ||
1507 | |||
1508 | def loop(self): | ||
1509 | """ | ||
1510 | Thread's main loop. Don't meant to be called by user directly. | ||
1511 | Call inherited start() method instead. | ||
1512 | |||
1513 | Events are read only once time every min(read_freq, timeout) | ||
1514 | seconds at best and only if the size of events to read is >= threshold. | ||
1515 | """ | ||
1516 | # When the loop must be terminated .stop() is called, 'stop' | ||
1517 | # is written to pipe fd so poll() returns and .check_events() | ||
1518 | # returns False which make evaluate the While's stop condition | ||
1519 | # ._stop_event.isSet() wich put an end to the thread's execution. | ||
1520 | while not self._stop_event.isSet(): | ||
1521 | self.process_events() | ||
1522 | ref_time = time.time() | ||
1523 | if self.check_events(): | ||
1524 | self._sleep(ref_time) | ||
1525 | self.read_events() | ||
1526 | |||
1527 | def run(self): | ||
1528 | """ | ||
1529 | Start thread's loop: read and process events until the method | ||
1530 | stop() is called. | ||
1531 | Never call this method directly, instead call the start() method | ||
1532 | inherited from threading.Thread, which then will call run() in | ||
1533 | its turn. | ||
1534 | """ | ||
1535 | self.loop() | ||
1536 | |||
1537 | |||
1538 | class AsyncNotifier(asyncore.file_dispatcher, Notifier): | ||
1539 | """ | ||
1540 | This notifier inherits from asyncore.file_dispatcher in order to be able to | ||
1541 | use pyinotify along with the asyncore framework. | ||
1542 | |||
1543 | """ | ||
1544 | def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, | ||
1545 | threshold=0, timeout=None, channel_map=None): | ||
1546 | """ | ||
1547 | Initializes the async notifier. The only additional parameter is | ||
1548 | 'channel_map' which is the optional asyncore private map. See | ||
1549 | Notifier class for the meaning of the others parameters. | ||
1550 | |||
1551 | """ | ||
1552 | Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, | ||
1553 | threshold, timeout) | ||
1554 | asyncore.file_dispatcher.__init__(self, self._fd, channel_map) | ||
1555 | |||
1556 | def handle_read(self): | ||
1557 | """ | ||
1558 | When asyncore tells us we can read from the fd, we proceed processing | ||
1559 | events. This method can be overridden for handling a notification | ||
1560 | differently. | ||
1561 | |||
1562 | """ | ||
1563 | self.read_events() | ||
1564 | self.process_events() | ||
1565 | |||
1566 | |||
1567 | class TornadoAsyncNotifier(Notifier): | ||
1568 | """ | ||
1569 | Tornado ioloop adapter. | ||
1570 | |||
1571 | """ | ||
1572 | def __init__(self, watch_manager, ioloop, callback=None, | ||
1573 | default_proc_fun=None, read_freq=0, threshold=0, timeout=None, | ||
1574 | channel_map=None): | ||
1575 | """ | ||
1576 | Note that if later you must call ioloop.close() be sure to let the | ||
1577 | default parameter to all_fds=False. | ||
1578 | |||
1579 | See example tornado_notifier.py for an example using this notifier. | ||
1580 | |||
1581 | @param ioloop: Tornado's IO loop. | ||
1582 | @type ioloop: tornado.ioloop.IOLoop instance. | ||
1583 | @param callback: Functor called at the end of each call to handle_read | ||
1584 | (IOLoop's read handler). Expects to receive the | ||
1585 | notifier object (self) as single parameter. | ||
1586 | @type callback: callable object or function | ||
1587 | """ | ||
1588 | self.io_loop = ioloop | ||
1589 | self.handle_read_callback = callback | ||
1590 | Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, | ||
1591 | threshold, timeout) | ||
1592 | ioloop.add_handler(self._fd, self.handle_read, ioloop.READ) | ||
1593 | |||
1594 | def stop(self): | ||
1595 | self.io_loop.remove_handler(self._fd) | ||
1596 | Notifier.stop(self) | ||
1597 | |||
1598 | def handle_read(self, *args, **kwargs): | ||
1599 | """ | ||
1600 | See comment in AsyncNotifier. | ||
1601 | |||
1602 | """ | ||
1603 | self.read_events() | ||
1604 | self.process_events() | ||
1605 | if self.handle_read_callback is not None: | ||
1606 | self.handle_read_callback(self) | ||
1607 | |||
1608 | |||
1609 | class AsyncioNotifier(Notifier): | ||
1610 | """ | ||
1611 | |||
1612 | asyncio/trollius event loop adapter. | ||
1613 | |||
1614 | """ | ||
1615 | def __init__(self, watch_manager, loop, callback=None, | ||
1616 | default_proc_fun=None, read_freq=0, threshold=0, timeout=None): | ||
1617 | """ | ||
1618 | |||
1619 | See examples/asyncio_notifier.py for an example usage. | ||
1620 | |||
1621 | @param loop: asyncio or trollius event loop instance. | ||
1622 | @type loop: asyncio.BaseEventLoop or trollius.BaseEventLoop instance. | ||
1623 | @param callback: Functor called at the end of each call to handle_read. | ||
1624 | Expects to receive the notifier object (self) as | ||
1625 | single parameter. | ||
1626 | @type callback: callable object or function | ||
1627 | |||
1628 | """ | ||
1629 | self.loop = loop | ||
1630 | self.handle_read_callback = callback | ||
1631 | Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, | ||
1632 | threshold, timeout) | ||
1633 | loop.add_reader(self._fd, self.handle_read) | ||
1634 | |||
1635 | def stop(self): | ||
1636 | self.loop.remove_reader(self._fd) | ||
1637 | Notifier.stop(self) | ||
1638 | |||
1639 | def handle_read(self, *args, **kwargs): | ||
1640 | self.read_events() | ||
1641 | self.process_events() | ||
1642 | if self.handle_read_callback is not None: | ||
1643 | self.handle_read_callback(self) | ||
1644 | |||
1645 | |||
1646 | class Watch: | ||
1647 | """ | ||
1648 | Represent a watch, i.e. a file or directory being watched. | ||
1649 | |||
1650 | """ | ||
1651 | __slots__ = ('wd', 'path', 'mask', 'proc_fun', 'auto_add', | ||
1652 | 'exclude_filter', 'dir') | ||
1653 | |||
1654 | def __init__(self, wd, path, mask, proc_fun, auto_add, exclude_filter): | ||
1655 | """ | ||
1656 | Initializations. | ||
1657 | |||
1658 | @param wd: Watch descriptor. | ||
1659 | @type wd: int | ||
1660 | @param path: Path of the file or directory being watched. | ||
1661 | @type path: str | ||
1662 | @param mask: Mask. | ||
1663 | @type mask: int | ||
1664 | @param proc_fun: Processing callable object. | ||
1665 | @type proc_fun: | ||
1666 | @param auto_add: Automatically add watches on new directories. | ||
1667 | @type auto_add: bool | ||
1668 | @param exclude_filter: Boolean function, used to exclude new | ||
1669 | directories from being automatically watched. | ||
1670 | See WatchManager.__init__ | ||
1671 | @type exclude_filter: callable object | ||
1672 | """ | ||
1673 | self.wd = wd | ||
1674 | self.path = path | ||
1675 | self.mask = mask | ||
1676 | self.proc_fun = proc_fun | ||
1677 | self.auto_add = auto_add | ||
1678 | self.exclude_filter = exclude_filter | ||
1679 | self.dir = os.path.isdir(self.path) | ||
1680 | |||
1681 | def __repr__(self): | ||
1682 | """ | ||
1683 | @return: String representation. | ||
1684 | @rtype: str | ||
1685 | """ | ||
1686 | s = ' '.join(['%s%s%s' % (output_format.field_name(attr), | ||
1687 | output_format.punctuation('='), | ||
1688 | output_format.field_value(getattr(self, | ||
1689 | attr))) \ | ||
1690 | for attr in self.__slots__ if not attr.startswith('_')]) | ||
1691 | |||
1692 | s = '%s%s %s %s' % (output_format.punctuation('<'), | ||
1693 | output_format.class_name(self.__class__.__name__), | ||
1694 | s, | ||
1695 | output_format.punctuation('>')) | ||
1696 | return s | ||
1697 | |||
1698 | |||
1699 | class ExcludeFilter: | ||
1700 | """ | ||
1701 | ExcludeFilter is an exclusion filter. | ||
1702 | |||
1703 | """ | ||
1704 | def __init__(self, arg_lst): | ||
1705 | """ | ||
1706 | Examples: | ||
1707 | ef1 = ExcludeFilter(["/etc/rc.*", "/etc/hostname"]) | ||
1708 | ef2 = ExcludeFilter("/my/path/exclude.lst") | ||
1709 | Where exclude.lst contains: | ||
1710 | /etc/rc.* | ||
1711 | /etc/hostname | ||
1712 | |||
1713 | Note: it is not possible to exclude a file if its encapsulating | ||
1714 | directory is itself watched. See this issue for more details | ||
1715 | https://github.com/seb-m/pyinotify/issues/31 | ||
1716 | |||
1717 | @param arg_lst: is either a list of patterns or a filename from which | ||
1718 | patterns will be loaded. | ||
1719 | @type arg_lst: list of str or str | ||
1720 | """ | ||
1721 | if isinstance(arg_lst, str): | ||
1722 | lst = self._load_patterns_from_file(arg_lst) | ||
1723 | elif isinstance(arg_lst, list): | ||
1724 | lst = arg_lst | ||
1725 | else: | ||
1726 | raise TypeError | ||
1727 | |||
1728 | self._lregex = [] | ||
1729 | for regex in lst: | ||
1730 | self._lregex.append(re.compile(regex, re.UNICODE)) | ||
1731 | |||
1732 | def _load_patterns_from_file(self, filename): | ||
1733 | lst = [] | ||
1734 | file_obj = file(filename, 'r') | ||
1735 | try: | ||
1736 | for line in file_obj.readlines(): | ||
1737 | # Trim leading an trailing whitespaces | ||
1738 | pattern = line.strip() | ||
1739 | if not pattern or pattern.startswith('#'): | ||
1740 | continue | ||
1741 | lst.append(pattern) | ||
1742 | finally: | ||
1743 | file_obj.close() | ||
1744 | return lst | ||
1745 | |||
1746 | def _match(self, regex, path): | ||
1747 | return regex.match(path) is not None | ||
1748 | |||
1749 | def __call__(self, path): | ||
1750 | """ | ||
1751 | @param path: Path to match against provided regexps. | ||
1752 | @type path: str | ||
1753 | @return: Return True if path has been matched and should | ||
1754 | be excluded, False otherwise. | ||
1755 | @rtype: bool | ||
1756 | """ | ||
1757 | for regex in self._lregex: | ||
1758 | if self._match(regex, path): | ||
1759 | return True | ||
1760 | return False | ||
1761 | |||
1762 | |||
1763 | class WatchManagerError(Exception): | ||
1764 | """ | ||
1765 | WatchManager Exception. Raised on error encountered on watches | ||
1766 | operations. | ||
1767 | |||
1768 | """ | ||
1769 | def __init__(self, msg, wmd): | ||
1770 | """ | ||
1771 | @param msg: Exception string's description. | ||
1772 | @type msg: string | ||
1773 | @param wmd: This dictionary contains the wd assigned to paths of the | ||
1774 | same call for which watches were successfully added. | ||
1775 | @type wmd: dict | ||
1776 | """ | ||
1777 | self.wmd = wmd | ||
1778 | Exception.__init__(self, msg) | ||
1779 | |||
1780 | |||
1781 | class WatchManager: | ||
1782 | """ | ||
1783 | Provide operations for watching files and directories. Its internal | ||
1784 | dictionary is used to reference watched items. When used inside | ||
1785 | threaded code, one must instanciate as many WatchManager instances as | ||
1786 | there are ThreadedNotifier instances. | ||
1787 | |||
1788 | """ | ||
1789 | def __init__(self, exclude_filter=lambda path: False): | ||
1790 | """ | ||
1791 | Initialization: init inotify, init watch manager dictionary. | ||
1792 | Raise OSError if initialization fails, raise InotifyBindingNotFoundError | ||
1793 | if no inotify binding was found (through ctypes or from direct access to | ||
1794 | syscalls). | ||
1795 | |||
1796 | @param exclude_filter: boolean function, returns True if current | ||
1797 | path must be excluded from being watched. | ||
1798 | Convenient for providing a common exclusion | ||
1799 | filter for every call to add_watch. | ||
1800 | @type exclude_filter: callable object | ||
1801 | """ | ||
1802 | self._ignore_events = False | ||
1803 | self._exclude_filter = exclude_filter | ||
1804 | self._wmd = {} # watch dict key: watch descriptor, value: watch | ||
1805 | |||
1806 | self._inotify_wrapper = INotifyWrapper.create() | ||
1807 | if self._inotify_wrapper is None: | ||
1808 | raise InotifyBindingNotFoundError() | ||
1809 | |||
1810 | self._fd = self._inotify_wrapper.inotify_init() # file descriptor | ||
1811 | if self._fd < 0: | ||
1812 | err = 'Cannot initialize new instance of inotify, %s' | ||
1813 | raise OSError(err % self._inotify_wrapper.str_errno()) | ||
1814 | |||
1815 | def close(self): | ||
1816 | """ | ||
1817 | Close inotify's file descriptor, this action will also automatically | ||
1818 | remove (i.e. stop watching) all its associated watch descriptors. | ||
1819 | After a call to this method the WatchManager's instance become useless | ||
1820 | and cannot be reused, a new instance must then be instanciated. It | ||
1821 | makes sense to call this method in few situations for instance if | ||
1822 | several independant WatchManager must be instanciated or if all watches | ||
1823 | must be removed and no other watches need to be added. | ||
1824 | """ | ||
1825 | os.close(self._fd) | ||
1826 | |||
1827 | def get_fd(self): | ||
1828 | """ | ||
1829 | Return assigned inotify's file descriptor. | ||
1830 | |||
1831 | @return: File descriptor. | ||
1832 | @rtype: int | ||
1833 | """ | ||
1834 | return self._fd | ||
1835 | |||
1836 | def get_watch(self, wd): | ||
1837 | """ | ||
1838 | Get watch from provided watch descriptor wd. | ||
1839 | |||
1840 | @param wd: Watch descriptor. | ||
1841 | @type wd: int | ||
1842 | """ | ||
1843 | return self._wmd.get(wd) | ||
1844 | |||
1845 | def del_watch(self, wd): | ||
1846 | """ | ||
1847 | Remove watch entry associated to watch descriptor wd. | ||
1848 | |||
1849 | @param wd: Watch descriptor. | ||
1850 | @type wd: int | ||
1851 | """ | ||
1852 | try: | ||
1853 | del self._wmd[wd] | ||
1854 | except KeyError, err: | ||
1855 | log.error('Cannot delete unknown watch descriptor %s' % str(err)) | ||
1856 | |||
1857 | @property | ||
1858 | def watches(self): | ||
1859 | """ | ||
1860 | Get a reference on the internal watch manager dictionary. | ||
1861 | |||
1862 | @return: Internal watch manager dictionary. | ||
1863 | @rtype: dict | ||
1864 | """ | ||
1865 | return self._wmd | ||
1866 | |||
1867 | def __format_path(self, path): | ||
1868 | """ | ||
1869 | Format path to its internal (stored in watch manager) representation. | ||
1870 | """ | ||
1871 | # Unicode strings are converted back to strings, because it seems | ||
1872 | # that inotify_add_watch from ctypes does not work well when | ||
1873 | # it receives an ctypes.create_unicode_buffer instance as argument. | ||
1874 | # Therefore even wd are indexed with bytes string and not with | ||
1875 | # unicode paths. | ||
1876 | if isinstance(path, unicode): | ||
1877 | path = path.encode(sys.getfilesystemencoding()) | ||
1878 | return os.path.normpath(path) | ||
1879 | |||
1880 | def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter): | ||
1881 | """ | ||
1882 | Add a watch on path, build a Watch object and insert it in the | ||
1883 | watch manager dictionary. Return the wd value. | ||
1884 | """ | ||
1885 | path = self.__format_path(path) | ||
1886 | if auto_add and not mask & IN_CREATE: | ||
1887 | mask |= IN_CREATE | ||
1888 | wd = self._inotify_wrapper.inotify_add_watch(self._fd, path, mask) | ||
1889 | if wd < 0: | ||
1890 | return wd | ||
1891 | watch = Watch(wd=wd, path=path, mask=mask, proc_fun=proc_fun, | ||
1892 | auto_add=auto_add, exclude_filter=exclude_filter) | ||
1893 | self._wmd[wd] = watch | ||
1894 | log.debug('New %s', watch) | ||
1895 | return wd | ||
1896 | |||
1897 | def __glob(self, path, do_glob): | ||
1898 | if do_glob: | ||
1899 | return glob(path) | ||
1900 | else: | ||
1901 | return [path] | ||
1902 | |||
1903 | def add_watch(self, path, mask, proc_fun=None, rec=False, | ||
1904 | auto_add=False, do_glob=False, quiet=True, | ||
1905 | exclude_filter=None): | ||
1906 | """ | ||
1907 | Add watch(s) on the provided |path|(s) with associated |mask| flag | ||
1908 | value and optionally with a processing |proc_fun| function and | ||
1909 | recursive flag |rec| set to True. | ||
1910 | Ideally |path| components should not be unicode objects. Note that | ||
1911 | although unicode paths are accepted there are converted to byte | ||
1912 | strings before a watch is put on that path. The encoding used for | ||
1913 | converting the unicode object is given by sys.getfilesystemencoding(). | ||
1914 | If |path| si already watched it is ignored, but if it is called with | ||
1915 | option rec=True a watch is put on each one of its not-watched | ||
1916 | subdirectory. | ||
1917 | |||
1918 | @param path: Path to watch, the path can either be a file or a | ||
1919 | directory. Also accepts a sequence (list) of paths. | ||
1920 | @type path: string or list of strings | ||
1921 | @param mask: Bitmask of events. | ||
1922 | @type mask: int | ||
1923 | @param proc_fun: Processing object. | ||
1924 | @type proc_fun: function or ProcessEvent instance or instance of | ||
1925 | one of its subclasses or callable object. | ||
1926 | @param rec: Recursively add watches from path on all its | ||
1927 | subdirectories, set to False by default (doesn't | ||
1928 | follows symlinks in any case). | ||
1929 | @type rec: bool | ||
1930 | @param auto_add: Automatically add watches on newly created | ||
1931 | directories in watched parent |path| directory. | ||
1932 | If |auto_add| is True, IN_CREATE is ored with |mask| | ||
1933 | when the watch is added. | ||
1934 | @type auto_add: bool | ||
1935 | @param do_glob: Do globbing on pathname (see standard globbing | ||
1936 | module for more informations). | ||
1937 | @type do_glob: bool | ||
1938 | @param quiet: if False raises a WatchManagerError exception on | ||
1939 | error. See example not_quiet.py. | ||
1940 | @type quiet: bool | ||
1941 | @param exclude_filter: predicate (boolean function), which returns | ||
1942 | True if the current path must be excluded | ||
1943 | from being watched. This argument has | ||
1944 | precedence over exclude_filter passed to | ||
1945 | the class' constructor. | ||
1946 | @type exclude_filter: callable object | ||
1947 | @return: dict of paths associated to watch descriptors. A wd value | ||
1948 | is positive if the watch was added sucessfully, | ||
1949 | otherwise the value is negative. If the path was invalid | ||
1950 | or was already watched it is not included into this returned | ||
1951 | dictionary. | ||
1952 | @rtype: dict of {str: int} | ||
1953 | """ | ||
1954 | ret_ = {} # return {path: wd, ...} | ||
1955 | |||
1956 | if exclude_filter is None: | ||
1957 | exclude_filter = self._exclude_filter | ||
1958 | |||
1959 | # normalize args as list elements | ||
1960 | for npath in self.__format_param(path): | ||
1961 | # unix pathname pattern expansion | ||
1962 | for apath in self.__glob(npath, do_glob): | ||
1963 | # recursively list subdirs according to rec param | ||
1964 | for rpath in self.__walk_rec(apath, rec): | ||
1965 | if not exclude_filter(rpath): | ||
1966 | wd = ret_[rpath] = self.__add_watch(rpath, mask, | ||
1967 | proc_fun, | ||
1968 | auto_add, | ||
1969 | exclude_filter) | ||
1970 | if wd < 0: | ||
1971 | err = ('add_watch: cannot watch %s WD=%d, %s' % \ | ||
1972 | (rpath, wd, | ||
1973 | self._inotify_wrapper.str_errno())) | ||
1974 | if quiet: | ||
1975 | log.error(err) | ||
1976 | else: | ||
1977 | raise WatchManagerError(err, ret_) | ||
1978 | else: | ||
1979 | # Let's say -2 means 'explicitely excluded | ||
1980 | # from watching'. | ||
1981 | ret_[rpath] = -2 | ||
1982 | return ret_ | ||
1983 | |||
1984 | def __get_sub_rec(self, lpath): | ||
1985 | """ | ||
1986 | Get every wd from self._wmd if its path is under the path of | ||
1987 | one (at least) of those in lpath. Doesn't follow symlinks. | ||
1988 | |||
1989 | @param lpath: list of watch descriptor | ||
1990 | @type lpath: list of int | ||
1991 | @return: list of watch descriptor | ||
1992 | @rtype: list of int | ||
1993 | """ | ||
1994 | for d in lpath: | ||
1995 | root = self.get_path(d) | ||
1996 | if root is not None: | ||
1997 | # always keep root | ||
1998 | yield d | ||
1999 | else: | ||
2000 | # if invalid | ||
2001 | continue | ||
2002 | |||
2003 | # nothing else to expect | ||
2004 | if not os.path.isdir(root): | ||
2005 | continue | ||
2006 | |||
2007 | # normalization | ||
2008 | root = os.path.normpath(root) | ||
2009 | # recursion | ||
2010 | lend = len(root) | ||
2011 | for iwd in self._wmd.items(): | ||
2012 | cur = iwd[1].path | ||
2013 | pref = os.path.commonprefix([root, cur]) | ||
2014 | if root == os.sep or (len(pref) == lend and \ | ||
2015 | len(cur) > lend and \ | ||
2016 | cur[lend] == os.sep): | ||
2017 | yield iwd[1].wd | ||
2018 | |||
2019 | def update_watch(self, wd, mask=None, proc_fun=None, rec=False, | ||
2020 | auto_add=False, quiet=True): | ||
2021 | """ | ||
2022 | Update existing watch descriptors |wd|. The |mask| value, the | ||
2023 | processing object |proc_fun|, the recursive param |rec| and the | ||
2024 | |auto_add| and |quiet| flags can all be updated. | ||
2025 | |||
2026 | @param wd: Watch Descriptor to update. Also accepts a list of | ||
2027 | watch descriptors. | ||
2028 | @type wd: int or list of int | ||
2029 | @param mask: Optional new bitmask of events. | ||
2030 | @type mask: int | ||
2031 | @param proc_fun: Optional new processing function. | ||
2032 | @type proc_fun: function or ProcessEvent instance or instance of | ||
2033 | one of its subclasses or callable object. | ||
2034 | @param rec: Optionally adds watches recursively on all | ||
2035 | subdirectories contained into |wd| directory. | ||
2036 | @type rec: bool | ||
2037 | @param auto_add: Automatically adds watches on newly created | ||
2038 | directories in the watch's path corresponding to |wd|. | ||
2039 | If |auto_add| is True, IN_CREATE is ored with |mask| | ||
2040 | when the watch is updated. | ||
2041 | @type auto_add: bool | ||
2042 | @param quiet: If False raises a WatchManagerError exception on | ||
2043 | error. See example not_quiet.py | ||
2044 | @type quiet: bool | ||
2045 | @return: dict of watch descriptors associated to booleans values. | ||
2046 | True if the corresponding wd has been successfully | ||
2047 | updated, False otherwise. | ||
2048 | @rtype: dict of {int: bool} | ||
2049 | """ | ||
2050 | lwd = self.__format_param(wd) | ||
2051 | if rec: | ||
2052 | lwd = self.__get_sub_rec(lwd) | ||
2053 | |||
2054 | ret_ = {} # return {wd: bool, ...} | ||
2055 | for awd in lwd: | ||
2056 | apath = self.get_path(awd) | ||
2057 | if not apath or awd < 0: | ||
2058 | err = 'update_watch: invalid WD=%d' % awd | ||
2059 | if quiet: | ||
2060 | log.error(err) | ||
2061 | continue | ||
2062 | raise WatchManagerError(err, ret_) | ||
2063 | |||
2064 | if mask: | ||
2065 | wd_ = self._inotify_wrapper.inotify_add_watch(self._fd, apath, | ||
2066 | mask) | ||
2067 | if wd_ < 0: | ||
2068 | ret_[awd] = False | ||
2069 | err = ('update_watch: cannot update %s WD=%d, %s' % \ | ||
2070 | (apath, wd_, self._inotify_wrapper.str_errno())) | ||
2071 | if quiet: | ||
2072 | log.error(err) | ||
2073 | continue | ||
2074 | raise WatchManagerError(err, ret_) | ||
2075 | |||
2076 | assert(awd == wd_) | ||
2077 | |||
2078 | if proc_fun or auto_add: | ||
2079 | watch_ = self._wmd[awd] | ||
2080 | |||
2081 | if proc_fun: | ||
2082 | watch_.proc_fun = proc_fun | ||
2083 | |||
2084 | if auto_add: | ||
2085 | watch_.auto_add = auto_add | ||
2086 | |||
2087 | ret_[awd] = True | ||
2088 | log.debug('Updated watch - %s', self._wmd[awd]) | ||
2089 | return ret_ | ||
2090 | |||
2091 | def __format_param(self, param): | ||
2092 | """ | ||
2093 | @param param: Parameter. | ||
2094 | @type param: string or int | ||
2095 | @return: wrap param. | ||
2096 | @rtype: list of type(param) | ||
2097 | """ | ||
2098 | if isinstance(param, list): | ||
2099 | for p_ in param: | ||
2100 | yield p_ | ||
2101 | else: | ||
2102 | yield param | ||
2103 | |||
2104 | def get_wd(self, path): | ||
2105 | """ | ||
2106 | Returns the watch descriptor associated to path. This method | ||
2107 | presents a prohibitive cost, always prefer to keep the WD | ||
2108 | returned by add_watch(). If the path is unknown it returns None. | ||
2109 | |||
2110 | @param path: Path. | ||
2111 | @type path: str | ||
2112 | @return: WD or None. | ||
2113 | @rtype: int or None | ||
2114 | """ | ||
2115 | path = self.__format_path(path) | ||
2116 | for iwd in self._wmd.items(): | ||
2117 | if iwd[1].path == path: | ||
2118 | return iwd[0] | ||
2119 | |||
2120 | def get_path(self, wd): | ||
2121 | """ | ||
2122 | Returns the path associated to WD, if WD is unknown it returns None. | ||
2123 | |||
2124 | @param wd: Watch descriptor. | ||
2125 | @type wd: int | ||
2126 | @return: Path or None. | ||
2127 | @rtype: string or None | ||
2128 | """ | ||
2129 | watch_ = self._wmd.get(wd) | ||
2130 | if watch_ is not None: | ||
2131 | return watch_.path | ||
2132 | |||
2133 | def __walk_rec(self, top, rec): | ||
2134 | """ | ||
2135 | Yields each subdirectories of top, doesn't follow symlinks. | ||
2136 | If rec is false, only yield top. | ||
2137 | |||
2138 | @param top: root directory. | ||
2139 | @type top: string | ||
2140 | @param rec: recursive flag. | ||
2141 | @type rec: bool | ||
2142 | @return: path of one subdirectory. | ||
2143 | @rtype: string | ||
2144 | """ | ||
2145 | if not rec or os.path.islink(top) or not os.path.isdir(top): | ||
2146 | yield top | ||
2147 | else: | ||
2148 | for root, dirs, files in os.walk(top): | ||
2149 | yield root | ||
2150 | |||
2151 | def rm_watch(self, wd, rec=False, quiet=True): | ||
2152 | """ | ||
2153 | Removes watch(s). | ||
2154 | |||
2155 | @param wd: Watch Descriptor of the file or directory to unwatch. | ||
2156 | Also accepts a list of WDs. | ||
2157 | @type wd: int or list of int. | ||
2158 | @param rec: Recursively removes watches on every already watched | ||
2159 | subdirectories and subfiles. | ||
2160 | @type rec: bool | ||
2161 | @param quiet: If False raises a WatchManagerError exception on | ||
2162 | error. See example not_quiet.py | ||
2163 | @type quiet: bool | ||
2164 | @return: dict of watch descriptors associated to booleans values. | ||
2165 | True if the corresponding wd has been successfully | ||
2166 | removed, False otherwise. | ||
2167 | @rtype: dict of {int: bool} | ||
2168 | """ | ||
2169 | lwd = self.__format_param(wd) | ||
2170 | if rec: | ||
2171 | lwd = self.__get_sub_rec(lwd) | ||
2172 | |||
2173 | ret_ = {} # return {wd: bool, ...} | ||
2174 | for awd in lwd: | ||
2175 | # remove watch | ||
2176 | wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd) | ||
2177 | if wd_ < 0: | ||
2178 | ret_[awd] = False | ||
2179 | err = ('rm_watch: cannot remove WD=%d, %s' % \ | ||
2180 | (awd, self._inotify_wrapper.str_errno())) | ||
2181 | if quiet: | ||
2182 | log.error(err) | ||
2183 | continue | ||
2184 | raise WatchManagerError(err, ret_) | ||
2185 | |||
2186 | # Remove watch from our dictionary | ||
2187 | if awd in self._wmd: | ||
2188 | del self._wmd[awd] | ||
2189 | ret_[awd] = True | ||
2190 | log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) | ||
2191 | return ret_ | ||
2192 | |||
2193 | |||
2194 | def watch_transient_file(self, filename, mask, proc_class): | ||
2195 | """ | ||
2196 | Watch a transient file, which will be created and deleted frequently | ||
2197 | over time (e.g. pid file). | ||
2198 | |||
2199 | @attention: Currently under the call to this function it is not | ||
2200 | possible to correctly watch the events triggered into the same | ||
2201 | base directory than the directory where is located this watched | ||
2202 | transient file. For instance it would be wrong to make these | ||
2203 | two successive calls: wm.watch_transient_file('/var/run/foo.pid', ...) | ||
2204 | and wm.add_watch('/var/run/', ...) | ||
2205 | |||
2206 | @param filename: Filename. | ||
2207 | @type filename: string | ||
2208 | @param mask: Bitmask of events, should contain IN_CREATE and IN_DELETE. | ||
2209 | @type mask: int | ||
2210 | @param proc_class: ProcessEvent (or of one of its subclass), beware of | ||
2211 | accepting a ProcessEvent's instance as argument into | ||
2212 | __init__, see transient_file.py example for more | ||
2213 | details. | ||
2214 | @type proc_class: ProcessEvent's instance or of one of its subclasses. | ||
2215 | @return: Same as add_watch(). | ||
2216 | @rtype: Same as add_watch(). | ||
2217 | """ | ||
2218 | dirname = os.path.dirname(filename) | ||
2219 | if dirname == '': | ||
2220 | return {} # Maintains coherence with add_watch() | ||
2221 | basename = os.path.basename(filename) | ||
2222 | # Assuming we are watching at least for IN_CREATE and IN_DELETE | ||
2223 | mask |= IN_CREATE | IN_DELETE | ||
2224 | |||
2225 | def cmp_name(event): | ||
2226 | if getattr(event, 'name') is None: | ||
2227 | return False | ||
2228 | return basename == event.name | ||
2229 | return self.add_watch(dirname, mask, | ||
2230 | proc_fun=proc_class(ChainIfTrue(func=cmp_name)), | ||
2231 | rec=False, | ||
2232 | auto_add=False, do_glob=False, | ||
2233 | exclude_filter=lambda path: False) | ||
2234 | |||
2235 | def get_ignore_events(self): | ||
2236 | return self._ignore_events | ||
2237 | |||
2238 | def set_ignore_events(self, nval): | ||
2239 | self._ignore_events = nval | ||
2240 | |||
2241 | ignore_events = property(get_ignore_events, set_ignore_events, | ||
2242 | "Make watch manager ignoring new events.") | ||
2243 | |||
2244 | |||
2245 | |||
2246 | class RawOutputFormat: | ||
2247 | """ | ||
2248 | Format string representations. | ||
2249 | """ | ||
2250 | def __init__(self, format=None): | ||
2251 | self.format = format or {} | ||
2252 | |||
2253 | def simple(self, s, attribute): | ||
2254 | if not isinstance(s, str): | ||
2255 | s = str(s) | ||
2256 | return (self.format.get(attribute, '') + s + | ||
2257 | self.format.get('normal', '')) | ||
2258 | |||
2259 | def punctuation(self, s): | ||
2260 | """Punctuation color.""" | ||
2261 | return self.simple(s, 'normal') | ||
2262 | |||
2263 | def field_value(self, s): | ||
2264 | """Field value color.""" | ||
2265 | return self.simple(s, 'purple') | ||
2266 | |||
2267 | def field_name(self, s): | ||
2268 | """Field name color.""" | ||
2269 | return self.simple(s, 'blue') | ||
2270 | |||
2271 | def class_name(self, s): | ||
2272 | """Class name color.""" | ||
2273 | return self.format.get('red', '') + self.simple(s, 'bold') | ||
2274 | |||
2275 | output_format = RawOutputFormat() | ||
2276 | |||
2277 | class ColoredOutputFormat(RawOutputFormat): | ||
2278 | """ | ||
2279 | Format colored string representations. | ||
2280 | """ | ||
2281 | def __init__(self): | ||
2282 | f = {'normal': '\033[0m', | ||
2283 | 'black': '\033[30m', | ||
2284 | 'red': '\033[31m', | ||
2285 | 'green': '\033[32m', | ||
2286 | 'yellow': '\033[33m', | ||
2287 | 'blue': '\033[34m', | ||
2288 | 'purple': '\033[35m', | ||
2289 | 'cyan': '\033[36m', | ||
2290 | 'bold': '\033[1m', | ||
2291 | 'uline': '\033[4m', | ||
2292 | 'blink': '\033[5m', | ||
2293 | 'invert': '\033[7m'} | ||
2294 | RawOutputFormat.__init__(self, f) | ||
2295 | |||
2296 | |||
2297 | def compatibility_mode(): | ||
2298 | """ | ||
2299 | Use this function to turn on the compatibility mode. The compatibility | ||
2300 | mode is used to improve compatibility with Pyinotify 0.7.1 (or older) | ||
2301 | programs. The compatibility mode provides additional variables 'is_dir', | ||
2302 | 'event_name', 'EventsCodes.IN_*' and 'EventsCodes.ALL_EVENTS' as | ||
2303 | Pyinotify 0.7.1 provided. Do not call this function from new programs!! | ||
2304 | Especially if there are developped for Pyinotify >= 0.8.x. | ||
2305 | """ | ||
2306 | setattr(EventsCodes, 'ALL_EVENTS', ALL_EVENTS) | ||
2307 | for evname in globals(): | ||
2308 | if evname.startswith('IN_'): | ||
2309 | setattr(EventsCodes, evname, globals()[evname]) | ||
2310 | global COMPATIBILITY_MODE | ||
2311 | COMPATIBILITY_MODE = True | ||
2312 | |||
2313 | |||
2314 | def command_line(): | ||
2315 | """ | ||
2316 | By default the watched path is '/tmp' and all types of events are | ||
2317 | monitored. Events monitoring serves forever, type c^c to stop it. | ||
2318 | """ | ||
2319 | from optparse import OptionParser | ||
2320 | |||
2321 | usage = "usage: %prog [options] [path1] [path2] [pathn]" | ||
2322 | |||
2323 | parser = OptionParser(usage=usage) | ||
2324 | parser.add_option("-v", "--verbose", action="store_true", | ||
2325 | dest="verbose", help="Verbose mode") | ||
2326 | parser.add_option("-r", "--recursive", action="store_true", | ||
2327 | dest="recursive", | ||
2328 | help="Add watches recursively on paths") | ||
2329 | parser.add_option("-a", "--auto_add", action="store_true", | ||
2330 | dest="auto_add", | ||
2331 | help="Automatically add watches on new directories") | ||
2332 | parser.add_option("-g", "--glob", action="store_true", | ||
2333 | dest="glob", | ||
2334 | help="Treat paths as globs") | ||
2335 | parser.add_option("-e", "--events-list", metavar="EVENT[,...]", | ||
2336 | dest="events_list", | ||
2337 | help=("A comma-separated list of events to watch for - " | ||
2338 | "see the documentation for valid options (defaults" | ||
2339 | " to everything)")) | ||
2340 | parser.add_option("-s", "--stats", action="store_true", | ||
2341 | dest="stats", | ||
2342 | help="Display dummy statistics") | ||
2343 | parser.add_option("-V", "--version", action="store_true", | ||
2344 | dest="version", help="Pyinotify version") | ||
2345 | parser.add_option("-f", "--raw-format", action="store_true", | ||
2346 | dest="raw_format", | ||
2347 | help="Disable enhanced output format.") | ||
2348 | parser.add_option("-c", "--command", action="store", | ||
2349 | dest="command", | ||
2350 | help="Shell command to run upon event") | ||
2351 | |||
2352 | (options, args) = parser.parse_args() | ||
2353 | |||
2354 | if options.verbose: | ||
2355 | log.setLevel(10) | ||
2356 | |||
2357 | if options.version: | ||
2358 | print(__version__) | ||
2359 | |||
2360 | if not options.raw_format: | ||
2361 | global output_format | ||
2362 | output_format = ColoredOutputFormat() | ||
2363 | |||
2364 | if len(args) < 1: | ||
2365 | path = '/tmp' # default watched path | ||
2366 | else: | ||
2367 | path = args | ||
2368 | |||
2369 | # watch manager instance | ||
2370 | wm = WatchManager() | ||
2371 | # notifier instance and init | ||
2372 | if options.stats: | ||
2373 | notifier = Notifier(wm, default_proc_fun=Stats(), read_freq=5) | ||
2374 | else: | ||
2375 | notifier = Notifier(wm, default_proc_fun=PrintAllEvents()) | ||
2376 | |||
2377 | # What mask to apply | ||
2378 | mask = 0 | ||
2379 | if options.events_list: | ||
2380 | events_list = options.events_list.split(',') | ||
2381 | for ev in events_list: | ||
2382 | evcode = EventsCodes.ALL_FLAGS.get(ev, 0) | ||
2383 | if evcode: | ||
2384 | mask |= evcode | ||
2385 | else: | ||
2386 | parser.error("The event '%s' specified with option -e" | ||
2387 | " is not valid" % ev) | ||
2388 | else: | ||
2389 | mask = ALL_EVENTS | ||
2390 | |||
2391 | # stats | ||
2392 | cb_fun = None | ||
2393 | if options.stats: | ||
2394 | def cb(s): | ||
2395 | sys.stdout.write(repr(s.proc_fun())) | ||
2396 | sys.stdout.write('\n') | ||
2397 | sys.stdout.write(str(s.proc_fun())) | ||
2398 | sys.stdout.write('\n') | ||
2399 | sys.stdout.flush() | ||
2400 | cb_fun = cb | ||
2401 | |||
2402 | # External command | ||
2403 | if options.command: | ||
2404 | def cb(s): | ||
2405 | subprocess.Popen(options.command, shell=True) | ||
2406 | cb_fun = cb | ||
2407 | |||
2408 | log.debug('Start monitoring %s, (press c^c to halt pyinotify)' % path) | ||
2409 | |||
2410 | wm.add_watch(path, mask, rec=options.recursive, auto_add=options.auto_add, do_glob=options.glob) | ||
2411 | # Loop forever (until sigint signal get caught) | ||
2412 | notifier.loop(callback=cb_fun) | ||
2413 | |||
2414 | |||
2415 | if __name__ == '__main__': | ||
2416 | command_line() | ||