From 74d51c8c3e18a8b6738aff0c6f7ff706fd70eaa6 Mon Sep 17 00:00:00 2001 From: Adrian Dudau Date: Thu, 26 Jun 2014 13:57:02 +0200 Subject: initial commit for Enea Linux 4.0 Migrated from the internal git server on the daisy-enea branch Signed-off-by: Adrian Dudau --- .../python-urlgrabber/pycurl-curl.patch | 288 +++++++++++++++++++++ .../python-urlgrabber/python-urlgrabber_3.10.1.bb | 37 +++ 2 files changed, 325 insertions(+) create mode 100644 recipes-extra/python-urlgrabber/python-urlgrabber/pycurl-curl.patch create mode 100644 recipes-extra/python-urlgrabber/python-urlgrabber_3.10.1.bb (limited to 'recipes-extra/python-urlgrabber') diff --git a/recipes-extra/python-urlgrabber/python-urlgrabber/pycurl-curl.patch b/recipes-extra/python-urlgrabber/python-urlgrabber/pycurl-curl.patch new file mode 100644 index 0000000..50f87e8 --- /dev/null +++ b/recipes-extra/python-urlgrabber/python-urlgrabber/pycurl-curl.patch @@ -0,0 +1,288 @@ +It seems that pycurl has been renamed to curl. + +Signed-off-by: Jonas Eriksson +Upstream-Status: Pending +diff -uNrp urlgrabber-3.10.1.orig/urlgrabber/grabber.py urlgrabber-3.10.1/urlgrabber/grabber.py +--- urlgrabber-3.10.1.orig/urlgrabber/grabber.py 2014-03-04 17:08:52.345678844 +0100 ++++ urlgrabber-3.10.1/urlgrabber/grabber.py 2014-03-04 17:09:49.074595399 +0100 +@@ -88,7 +88,7 @@ GENERAL ARGUMENTS (kwargs) + a positive integer expressing the number of seconds to wait before + timing out attempts to connect to a server. If the value is None + or 0, connection attempts will not time out. The timeout is passed +- to the underlying pycurl object as its CONNECTTIMEOUT option, see ++ to the underlying curl object as its CONNECTTIMEOUT option, see + the curl documentation on CURLOPT_CONNECTTIMEOUT for more information. + http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUT + +@@ -509,7 +509,7 @@ import mimetools + import thread + import types + import stat +-import pycurl ++import curl + from ftplib import parse150 + from StringIO import StringIO + from httplib import HTTPException +@@ -821,7 +821,7 @@ class URLParser: + + def process_http(self, parts, url): + (scheme, host, path, parm, query, frag) = parts +- # TODO: auth-parsing here, maybe? pycurl doesn't really need it ++ # TODO: auth-parsing here, maybe? curl doesn't really need it + return (scheme, host, path, parm, query, frag) + + def quote(self, parts): +@@ -983,7 +983,7 @@ class URLGrabberOptions: + self.username = None + self.password = None + self.ssl_ca_cert = None # sets SSL_CAINFO - path to certdb +- self.ssl_context = None # no-op in pycurl ++ self.ssl_context = None # no-op in curl + self.ssl_verify_peer = True # check peer's cert for authenticityb + self.ssl_verify_host = True # make sure who they are and who the cert is for matches + self.ssl_key = None # client key +@@ -1355,7 +1355,7 @@ class PyCurlFileObject(object): + + return len(buf) + except KeyboardInterrupt: +- return pycurl.READFUNC_ABORT ++ return curl.READFUNC_ABORT + + def _return_hdr_obj(self): + if self._parsed_hdr: +@@ -1370,7 +1370,7 @@ class PyCurlFileObject(object): + + hdr = property(_return_hdr_obj) + http_code = property(fget= +- lambda self: self.curl_obj.getinfo(pycurl.RESPONSE_CODE)) ++ lambda self: self.curl_obj.getinfo(curl.RESPONSE_CODE)) + + def _set_opts(self, opts={}): + # XXX +@@ -1379,109 +1379,109 @@ class PyCurlFileObject(object): + + # keepalives + if not opts.keepalive: +- self.curl_obj.setopt(pycurl.FORBID_REUSE, 1) ++ self.curl_obj.setopt(curl.FORBID_REUSE, 1) + + # defaults we're always going to set +- self.curl_obj.setopt(pycurl.NOPROGRESS, False) +- self.curl_obj.setopt(pycurl.NOSIGNAL, True) +- self.curl_obj.setopt(pycurl.WRITEFUNCTION, self._retrieve) +- self.curl_obj.setopt(pycurl.HEADERFUNCTION, self._hdr_retrieve) +- self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update) +- self.curl_obj.setopt(pycurl.FAILONERROR, True) +- self.curl_obj.setopt(pycurl.OPT_FILETIME, True) +- self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) ++ self.curl_obj.setopt(curl.NOPROGRESS, False) ++ self.curl_obj.setopt(curl.NOSIGNAL, True) ++ self.curl_obj.setopt(curl.WRITEFUNCTION, self._retrieve) ++ self.curl_obj.setopt(curl.HEADERFUNCTION, self._hdr_retrieve) ++ self.curl_obj.setopt(curl.PROGRESSFUNCTION, self._progress_update) ++ self.curl_obj.setopt(curl.FAILONERROR, True) ++ self.curl_obj.setopt(curl.OPT_FILETIME, True) ++ self.curl_obj.setopt(curl.FOLLOWLOCATION, True) + + if DEBUG and DEBUG.level <= 10: +- self.curl_obj.setopt(pycurl.VERBOSE, True) ++ self.curl_obj.setopt(curl.VERBOSE, True) + if opts.user_agent: +- self.curl_obj.setopt(pycurl.USERAGENT, opts.user_agent) ++ self.curl_obj.setopt(curl.USERAGENT, opts.user_agent) + if opts.ip_resolve: + # Default is: IPRESOLVE_WHATEVER + ipr = opts.ip_resolve.lower() + if ipr == 'whatever': # Do we need this? +- self.curl_obj.setopt(pycurl.IPRESOLVE,pycurl.IPRESOLVE_WHATEVER) ++ self.curl_obj.setopt(curl.IPRESOLVE,curl.IPRESOLVE_WHATEVER) + if ipr == 'ipv4': +- self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) ++ self.curl_obj.setopt(curl.IPRESOLVE, curl.IPRESOLVE_V4) + if ipr == 'ipv6': +- self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V6) ++ self.curl_obj.setopt(curl.IPRESOLVE, curl.IPRESOLVE_V6) + + # maybe to be options later +- self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) +- self.curl_obj.setopt(pycurl.MAXREDIRS, 5) ++ self.curl_obj.setopt(curl.FOLLOWLOCATION, True) ++ self.curl_obj.setopt(curl.MAXREDIRS, 5) + + # timeouts + timeout = 300 + if hasattr(opts, 'timeout'): + timeout = int(opts.timeout or 0) +- self.curl_obj.setopt(pycurl.CONNECTTIMEOUT, timeout) +- self.curl_obj.setopt(pycurl.LOW_SPEED_LIMIT, opts.minrate or 1000) +- self.curl_obj.setopt(pycurl.LOW_SPEED_TIME, timeout) ++ self.curl_obj.setopt(curl.CONNECTTIMEOUT, timeout) ++ self.curl_obj.setopt(curl.LOW_SPEED_LIMIT, opts.minrate or 1000) ++ self.curl_obj.setopt(curl.LOW_SPEED_TIME, timeout) + + # ssl options + if self.scheme == 'https': + if opts.ssl_ca_cert: # this may do ZERO with nss according to curl docs +- self.curl_obj.setopt(pycurl.CAPATH, opts.ssl_ca_cert) +- self.curl_obj.setopt(pycurl.CAINFO, opts.ssl_ca_cert) +- self.curl_obj.setopt(pycurl.SSL_VERIFYPEER, opts.ssl_verify_peer) ++ self.curl_obj.setopt(curl.CAPATH, opts.ssl_ca_cert) ++ self.curl_obj.setopt(curl.CAINFO, opts.ssl_ca_cert) ++ self.curl_obj.setopt(curl.SSL_VERIFYPEER, opts.ssl_verify_peer) + if opts.ssl_verify_host: # 1 is meaningless to curl +- self.curl_obj.setopt(pycurl.SSL_VERIFYHOST, 2) ++ self.curl_obj.setopt(curl.SSL_VERIFYHOST, 2) + if opts.ssl_key: +- self.curl_obj.setopt(pycurl.SSLKEY, opts.ssl_key) ++ self.curl_obj.setopt(curl.SSLKEY, opts.ssl_key) + if opts.ssl_key_type: +- self.curl_obj.setopt(pycurl.SSLKEYTYPE, opts.ssl_key_type) ++ self.curl_obj.setopt(curl.SSLKEYTYPE, opts.ssl_key_type) + if opts.ssl_cert: +- self.curl_obj.setopt(pycurl.SSLCERT, opts.ssl_cert) ++ self.curl_obj.setopt(curl.SSLCERT, opts.ssl_cert) + # if we have a client side cert - turn off reuse b/c nss is odd +- self.curl_obj.setopt(pycurl.FORBID_REUSE, 1) ++ self.curl_obj.setopt(curl.FORBID_REUSE, 1) + if opts.ssl_cert_type: +- self.curl_obj.setopt(pycurl.SSLCERTTYPE, opts.ssl_cert_type) ++ self.curl_obj.setopt(curl.SSLCERTTYPE, opts.ssl_cert_type) + if opts.ssl_key_pass: +- self.curl_obj.setopt(pycurl.SSLKEYPASSWD, opts.ssl_key_pass) ++ self.curl_obj.setopt(curl.SSLKEYPASSWD, opts.ssl_key_pass) + + #headers: + if opts.http_headers and self.scheme in ('http', 'https'): + headers = [] + for (tag, content) in opts.http_headers: + headers.append('%s:%s' % (tag, content)) +- self.curl_obj.setopt(pycurl.HTTPHEADER, headers) ++ self.curl_obj.setopt(curl.HTTPHEADER, headers) + + # ranges: + if opts.range or opts.reget: + range_str = self._build_range() + if range_str: +- self.curl_obj.setopt(pycurl.RANGE, range_str) ++ self.curl_obj.setopt(curl.RANGE, range_str) + + # throttle/bandwidth + if hasattr(opts, 'raw_throttle') and opts.raw_throttle(): +- self.curl_obj.setopt(pycurl.MAX_RECV_SPEED_LARGE, int(opts.raw_throttle())) ++ self.curl_obj.setopt(curl.MAX_RECV_SPEED_LARGE, int(opts.raw_throttle())) + + # proxy + if opts.proxy is not None: +- self.curl_obj.setopt(pycurl.PROXY, opts.proxy) +- self.curl_obj.setopt(pycurl.PROXYAUTH, ++ self.curl_obj.setopt(curl.PROXY, opts.proxy) ++ self.curl_obj.setopt(curl.PROXYAUTH, + # All but Kerberos. BZ 769254 +- pycurl.HTTPAUTH_ANY - pycurl.HTTPAUTH_GSSNEGOTIATE) ++ curl.HTTPAUTH_ANY - curl.HTTPAUTH_GSSNEGOTIATE) + + if opts.username and opts.password: + if self.scheme in ('http', 'https'): +- self.curl_obj.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY) ++ self.curl_obj.setopt(curl.HTTPAUTH, curl.HTTPAUTH_ANY) + + if opts.username and opts.password: + # apparently when applying them as curlopts they do not require quoting of any kind + userpwd = '%s:%s' % (opts.username, opts.password) +- self.curl_obj.setopt(pycurl.USERPWD, userpwd) ++ self.curl_obj.setopt(curl.USERPWD, userpwd) + + #posts - simple - expects the fields as they are + if opts.data: +- self.curl_obj.setopt(pycurl.POST, True) +- self.curl_obj.setopt(pycurl.POSTFIELDS, _to_utf8(opts.data)) ++ self.curl_obj.setopt(curl.POST, True) ++ self.curl_obj.setopt(curl.POSTFIELDS, _to_utf8(opts.data)) + + # ftp + if opts.ftp_disable_epsv: +- self.curl_obj.setopt(pycurl.FTP_USE_EPSV, False) ++ self.curl_obj.setopt(curl.FTP_USE_EPSV, False) + + # our url +- self.curl_obj.setopt(pycurl.URL, self.url) ++ self.curl_obj.setopt(curl.URL, self.url) + + + def _do_perform(self): +@@ -1490,7 +1490,7 @@ class PyCurlFileObject(object): + + try: + self.curl_obj.perform() +- except pycurl.error, e: ++ except curl.error, e: + # XXX - break some of these out a bit more clearly + # to other URLGrabErrors from + # http://curl.haxx.se/libcurl/c/libcurl-errors.html +@@ -1505,11 +1505,11 @@ class PyCurlFileObject(object): + + if errcode == 23 and 200 <= code <= 299: + # this is probably wrong but ultimately this is what happens +- # we have a legit http code and a pycurl 'writer failed' code ++ # we have a legit http code and a curl 'writer failed' code + # which almost always means something aborted it from outside + # since we cannot know what it is -I'm banking on it being + # a ctrl-c. XXXX - if there's a way of going back two raises to +- # figure out what aborted the pycurl process FIXME ++ # figure out what aborted the curl process FIXME + raise getattr(self, '_cb_error', KeyboardInterrupt) + + elif errcode == 28: +@@ -1519,11 +1519,11 @@ class PyCurlFileObject(object): + + elif errcode == 42: + # this is probably wrong but ultimately this is what happens +- # we have a legit http code and a pycurl 'writer failed' code ++ # we have a legit http code and a curl 'writer failed' code + # which almost always means something aborted it from outside + # since we cannot know what it is -I'm banking on it being + # a ctrl-c. XXXX - if there's a way of going back two raises to +- # figure out what aborted the pycurl process FIXME ++ # figure out what aborted the curl process FIXME + raise KeyboardInterrupt + + else: +@@ -1750,7 +1750,7 @@ class PyCurlFileObject(object): + pass # URL too long. = IOError ... ignore everything. + + # set the time +- mod_time = self.curl_obj.getinfo(pycurl.INFO_FILETIME) ++ mod_time = self.curl_obj.getinfo(curl.INFO_FILETIME) + if mod_time != -1: + try: + os.utime(self.filename, (mod_time, mod_time)) +@@ -1863,7 +1863,7 @@ class PyCurlFileObject(object): + + msg = _("Downloaded more than max size for %s: %s > %s") \ + % (self.url, cur, max_size) +- self._error = (pycurl.E_FILESIZE_EXCEEDED, msg) ++ self._error = (curl.E_FILESIZE_EXCEEDED, msg) + return True + return False + +@@ -1903,16 +1903,16 @@ class PyCurlFileObject(object): + urllib.addinfourl, via. urllib.URLopener.* """ + return self.url + +-if hasattr(pycurl, 'GLOBAL_ACK_EINTR'): ++if hasattr(curl, 'GLOBAL_ACK_EINTR'): + # fail immediately on ctrl-c +- pycurl.global_init(pycurl.GLOBAL_DEFAULT | pycurl.GLOBAL_ACK_EINTR) +-_curl_cache = pycurl.Curl() # make one and reuse it over and over and over ++ curl.global_init(curl.GLOBAL_DEFAULT | curl.GLOBAL_ACK_EINTR) ++_curl_cache = curl.Curl() # make one and reuse it over and over and over + + def reset_curl_obj(): + """To make sure curl has reread the network/dns info we force a reload""" + global _curl_cache + _curl_cache.close() +- _curl_cache = pycurl.Curl() ++ _curl_cache = curl.Curl() + + _libproxy_cache = None + diff --git a/recipes-extra/python-urlgrabber/python-urlgrabber_3.10.1.bb b/recipes-extra/python-urlgrabber/python-urlgrabber_3.10.1.bb new file mode 100644 index 0000000..97dab20 --- /dev/null +++ b/recipes-extra/python-urlgrabber/python-urlgrabber_3.10.1.bb @@ -0,0 +1,37 @@ +DESCRIPTION = "urlgrabber is a pure python package that drastically simplifies the fetching of files." + +HOMEPAGE = "http://urlgrabber.baseurl.org/" +SECTION = "devel/python" +PRIORITY = "optional" +LICENSE = "LGPL2.1+" +PR = "r1" + +LIC_FILES_CHKSUM = "file://LICENSE;md5=68ad62c64cc6c620126241fd429e68fe" + +SRC_URI = "http://urlgrabber.baseurl.org/download/urlgrabber-${PV}.tar.gz \ + file://pycurl-curl.patch" + +S = "${WORKDIR}/urlgrabber-${PV}" + +SRC_URI[md5sum] = "1f5dc63805623cc473e06204fd240bb2" +SRC_URI[sha256sum] = "06b13ff8d527dba3aee04069681b2c09c03117592d5485a80ae4b807cdf33476" + +RDEPENDS_${PN} = "python-pycurl" + +inherit distutils + +FILES_${PN} += "/usr/share/libexec" + +# setup.py will try to include the urlgrabber package, which fails since we +# don't have pycurl in the native sysroot. It's included just to get the +# version and description text strings for inclusion in the package. Avoid this +# by dynamically creating a version of urlgrabber that does not include the +# actual urlgrabber features, that setup.py can include. +do_patch() { + # Create a non-importing version of urlgrabber for the setup script + mkdir ${S}/urlgrabber_version + sed 's/^from grabber import.*//' ${S}/urlgrabber/__init__.py > ${S}/urlgrabber_version/__init__.py + + # Make sure the setup script uses the version-only urlgrabber + sed -i 's/import urlgrabber/&_version/' ${S}/setup.py +} -- cgit v1.2.3-54-g00ecf