It seems that pycurl has been renamed to curl. Signed-off-by: Jonas Eriksson Upstream-Status: Pending diff -uNrp urlgrabber-3.10.1.orig/urlgrabber/grabber.py urlgrabber-3.10.1/urlgrabber/grabber.py --- urlgrabber-3.10.1.orig/urlgrabber/grabber.py 2014-03-04 17:08:52.345678844 +0100 +++ urlgrabber-3.10.1/urlgrabber/grabber.py 2014-03-04 17:09:49.074595399 +0100 @@ -88,7 +88,7 @@ GENERAL ARGUMENTS (kwargs) a positive integer expressing the number of seconds to wait before timing out attempts to connect to a server. If the value is None or 0, connection attempts will not time out. The timeout is passed - to the underlying pycurl object as its CONNECTTIMEOUT option, see + to the underlying curl object as its CONNECTTIMEOUT option, see the curl documentation on CURLOPT_CONNECTTIMEOUT for more information. http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUT @@ -509,7 +509,7 @@ import mimetools import thread import types import stat -import pycurl +import curl from ftplib import parse150 from StringIO import StringIO from httplib import HTTPException @@ -821,7 +821,7 @@ class URLParser: def process_http(self, parts, url): (scheme, host, path, parm, query, frag) = parts - # TODO: auth-parsing here, maybe? pycurl doesn't really need it + # TODO: auth-parsing here, maybe? curl doesn't really need it return (scheme, host, path, parm, query, frag) def quote(self, parts): @@ -983,7 +983,7 @@ class URLGrabberOptions: self.username = None self.password = None self.ssl_ca_cert = None # sets SSL_CAINFO - path to certdb - self.ssl_context = None # no-op in pycurl + self.ssl_context = None # no-op in curl self.ssl_verify_peer = True # check peer's cert for authenticityb self.ssl_verify_host = True # make sure who they are and who the cert is for matches self.ssl_key = None # client key @@ -1355,7 +1355,7 @@ class PyCurlFileObject(object): return len(buf) except KeyboardInterrupt: - return pycurl.READFUNC_ABORT + return curl.READFUNC_ABORT def _return_hdr_obj(self): if self._parsed_hdr: @@ -1370,7 +1370,7 @@ class PyCurlFileObject(object): hdr = property(_return_hdr_obj) http_code = property(fget= - lambda self: self.curl_obj.getinfo(pycurl.RESPONSE_CODE)) + lambda self: self.curl_obj.getinfo(curl.RESPONSE_CODE)) def _set_opts(self, opts={}): # XXX @@ -1379,109 +1379,109 @@ class PyCurlFileObject(object): # keepalives if not opts.keepalive: - self.curl_obj.setopt(pycurl.FORBID_REUSE, 1) + self.curl_obj.setopt(curl.FORBID_REUSE, 1) # defaults we're always going to set - self.curl_obj.setopt(pycurl.NOPROGRESS, False) - self.curl_obj.setopt(pycurl.NOSIGNAL, True) - self.curl_obj.setopt(pycurl.WRITEFUNCTION, self._retrieve) - self.curl_obj.setopt(pycurl.HEADERFUNCTION, self._hdr_retrieve) - self.curl_obj.setopt(pycurl.PROGRESSFUNCTION, self._progress_update) - self.curl_obj.setopt(pycurl.FAILONERROR, True) - self.curl_obj.setopt(pycurl.OPT_FILETIME, True) - self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) + self.curl_obj.setopt(curl.NOPROGRESS, False) + self.curl_obj.setopt(curl.NOSIGNAL, True) + self.curl_obj.setopt(curl.WRITEFUNCTION, self._retrieve) + self.curl_obj.setopt(curl.HEADERFUNCTION, self._hdr_retrieve) + self.curl_obj.setopt(curl.PROGRESSFUNCTION, self._progress_update) + self.curl_obj.setopt(curl.FAILONERROR, True) + self.curl_obj.setopt(curl.OPT_FILETIME, True) + self.curl_obj.setopt(curl.FOLLOWLOCATION, True) if DEBUG and DEBUG.level <= 10: - self.curl_obj.setopt(pycurl.VERBOSE, True) + self.curl_obj.setopt(curl.VERBOSE, True) if opts.user_agent: - self.curl_obj.setopt(pycurl.USERAGENT, opts.user_agent) + self.curl_obj.setopt(curl.USERAGENT, opts.user_agent) if opts.ip_resolve: # Default is: IPRESOLVE_WHATEVER ipr = opts.ip_resolve.lower() if ipr == 'whatever': # Do we need this? - self.curl_obj.setopt(pycurl.IPRESOLVE,pycurl.IPRESOLVE_WHATEVER) + self.curl_obj.setopt(curl.IPRESOLVE,curl.IPRESOLVE_WHATEVER) if ipr == 'ipv4': - self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) + self.curl_obj.setopt(curl.IPRESOLVE, curl.IPRESOLVE_V4) if ipr == 'ipv6': - self.curl_obj.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V6) + self.curl_obj.setopt(curl.IPRESOLVE, curl.IPRESOLVE_V6) # maybe to be options later - self.curl_obj.setopt(pycurl.FOLLOWLOCATION, True) - self.curl_obj.setopt(pycurl.MAXREDIRS, 5) + self.curl_obj.setopt(curl.FOLLOWLOCATION, True) + self.curl_obj.setopt(curl.MAXREDIRS, 5) # timeouts timeout = 300 if hasattr(opts, 'timeout'): timeout = int(opts.timeout or 0) - self.curl_obj.setopt(pycurl.CONNECTTIMEOUT, timeout) - self.curl_obj.setopt(pycurl.LOW_SPEED_LIMIT, opts.minrate or 1000) - self.curl_obj.setopt(pycurl.LOW_SPEED_TIME, timeout) + self.curl_obj.setopt(curl.CONNECTTIMEOUT, timeout) + self.curl_obj.setopt(curl.LOW_SPEED_LIMIT, opts.minrate or 1000) + self.curl_obj.setopt(curl.LOW_SPEED_TIME, timeout) # ssl options if self.scheme == 'https': if opts.ssl_ca_cert: # this may do ZERO with nss according to curl docs - self.curl_obj.setopt(pycurl.CAPATH, opts.ssl_ca_cert) - self.curl_obj.setopt(pycurl.CAINFO, opts.ssl_ca_cert) - self.curl_obj.setopt(pycurl.SSL_VERIFYPEER, opts.ssl_verify_peer) + self.curl_obj.setopt(curl.CAPATH, opts.ssl_ca_cert) + self.curl_obj.setopt(curl.CAINFO, opts.ssl_ca_cert) + self.curl_obj.setopt(curl.SSL_VERIFYPEER, opts.ssl_verify_peer) if opts.ssl_verify_host: # 1 is meaningless to curl - self.curl_obj.setopt(pycurl.SSL_VERIFYHOST, 2) + self.curl_obj.setopt(curl.SSL_VERIFYHOST, 2) if opts.ssl_key: - self.curl_obj.setopt(pycurl.SSLKEY, opts.ssl_key) + self.curl_obj.setopt(curl.SSLKEY, opts.ssl_key) if opts.ssl_key_type: - self.curl_obj.setopt(pycurl.SSLKEYTYPE, opts.ssl_key_type) + self.curl_obj.setopt(curl.SSLKEYTYPE, opts.ssl_key_type) if opts.ssl_cert: - self.curl_obj.setopt(pycurl.SSLCERT, opts.ssl_cert) + self.curl_obj.setopt(curl.SSLCERT, opts.ssl_cert) # if we have a client side cert - turn off reuse b/c nss is odd - self.curl_obj.setopt(pycurl.FORBID_REUSE, 1) + self.curl_obj.setopt(curl.FORBID_REUSE, 1) if opts.ssl_cert_type: - self.curl_obj.setopt(pycurl.SSLCERTTYPE, opts.ssl_cert_type) + self.curl_obj.setopt(curl.SSLCERTTYPE, opts.ssl_cert_type) if opts.ssl_key_pass: - self.curl_obj.setopt(pycurl.SSLKEYPASSWD, opts.ssl_key_pass) + self.curl_obj.setopt(curl.SSLKEYPASSWD, opts.ssl_key_pass) #headers: if opts.http_headers and self.scheme in ('http', 'https'): headers = [] for (tag, content) in opts.http_headers: headers.append('%s:%s' % (tag, content)) - self.curl_obj.setopt(pycurl.HTTPHEADER, headers) + self.curl_obj.setopt(curl.HTTPHEADER, headers) # ranges: if opts.range or opts.reget: range_str = self._build_range() if range_str: - self.curl_obj.setopt(pycurl.RANGE, range_str) + self.curl_obj.setopt(curl.RANGE, range_str) # throttle/bandwidth if hasattr(opts, 'raw_throttle') and opts.raw_throttle(): - self.curl_obj.setopt(pycurl.MAX_RECV_SPEED_LARGE, int(opts.raw_throttle())) + self.curl_obj.setopt(curl.MAX_RECV_SPEED_LARGE, int(opts.raw_throttle())) # proxy if opts.proxy is not None: - self.curl_obj.setopt(pycurl.PROXY, opts.proxy) - self.curl_obj.setopt(pycurl.PROXYAUTH, + self.curl_obj.setopt(curl.PROXY, opts.proxy) + self.curl_obj.setopt(curl.PROXYAUTH, # All but Kerberos. BZ 769254 - pycurl.HTTPAUTH_ANY - pycurl.HTTPAUTH_GSSNEGOTIATE) + curl.HTTPAUTH_ANY - curl.HTTPAUTH_GSSNEGOTIATE) if opts.username and opts.password: if self.scheme in ('http', 'https'): - self.curl_obj.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_ANY) + self.curl_obj.setopt(curl.HTTPAUTH, curl.HTTPAUTH_ANY) if opts.username and opts.password: # apparently when applying them as curlopts they do not require quoting of any kind userpwd = '%s:%s' % (opts.username, opts.password) - self.curl_obj.setopt(pycurl.USERPWD, userpwd) + self.curl_obj.setopt(curl.USERPWD, userpwd) #posts - simple - expects the fields as they are if opts.data: - self.curl_obj.setopt(pycurl.POST, True) - self.curl_obj.setopt(pycurl.POSTFIELDS, _to_utf8(opts.data)) + self.curl_obj.setopt(curl.POST, True) + self.curl_obj.setopt(curl.POSTFIELDS, _to_utf8(opts.data)) # ftp if opts.ftp_disable_epsv: - self.curl_obj.setopt(pycurl.FTP_USE_EPSV, False) + self.curl_obj.setopt(curl.FTP_USE_EPSV, False) # our url - self.curl_obj.setopt(pycurl.URL, self.url) + self.curl_obj.setopt(curl.URL, self.url) def _do_perform(self): @@ -1490,7 +1490,7 @@ class PyCurlFileObject(object): try: self.curl_obj.perform() - except pycurl.error, e: + except curl.error, e: # XXX - break some of these out a bit more clearly # to other URLGrabErrors from # http://curl.haxx.se/libcurl/c/libcurl-errors.html @@ -1505,11 +1505,11 @@ class PyCurlFileObject(object): if errcode == 23 and 200 <= code <= 299: # this is probably wrong but ultimately this is what happens - # we have a legit http code and a pycurl 'writer failed' code + # we have a legit http code and a curl 'writer failed' code # which almost always means something aborted it from outside # since we cannot know what it is -I'm banking on it being # a ctrl-c. XXXX - if there's a way of going back two raises to - # figure out what aborted the pycurl process FIXME + # figure out what aborted the curl process FIXME raise getattr(self, '_cb_error', KeyboardInterrupt) elif errcode == 28: @@ -1519,11 +1519,11 @@ class PyCurlFileObject(object): elif errcode == 42: # this is probably wrong but ultimately this is what happens - # we have a legit http code and a pycurl 'writer failed' code + # we have a legit http code and a curl 'writer failed' code # which almost always means something aborted it from outside # since we cannot know what it is -I'm banking on it being # a ctrl-c. XXXX - if there's a way of going back two raises to - # figure out what aborted the pycurl process FIXME + # figure out what aborted the curl process FIXME raise KeyboardInterrupt else: @@ -1750,7 +1750,7 @@ class PyCurlFileObject(object): pass # URL too long. = IOError ... ignore everything. # set the time - mod_time = self.curl_obj.getinfo(pycurl.INFO_FILETIME) + mod_time = self.curl_obj.getinfo(curl.INFO_FILETIME) if mod_time != -1: try: os.utime(self.filename, (mod_time, mod_time)) @@ -1863,7 +1863,7 @@ class PyCurlFileObject(object): msg = _("Downloaded more than max size for %s: %s > %s") \ % (self.url, cur, max_size) - self._error = (pycurl.E_FILESIZE_EXCEEDED, msg) + self._error = (curl.E_FILESIZE_EXCEEDED, msg) return True return False @@ -1903,16 +1903,16 @@ class PyCurlFileObject(object): urllib.addinfourl, via. urllib.URLopener.* """ return self.url -if hasattr(pycurl, 'GLOBAL_ACK_EINTR'): +if hasattr(curl, 'GLOBAL_ACK_EINTR'): # fail immediately on ctrl-c - pycurl.global_init(pycurl.GLOBAL_DEFAULT | pycurl.GLOBAL_ACK_EINTR) -_curl_cache = pycurl.Curl() # make one and reuse it over and over and over + curl.global_init(curl.GLOBAL_DEFAULT | curl.GLOBAL_ACK_EINTR) +_curl_cache = curl.Curl() # make one and reuse it over and over and over def reset_curl_obj(): """To make sure curl has reread the network/dns info we force a reload""" global _curl_cache _curl_cache.close() - _curl_cache = pycurl.Curl() + _curl_cache = curl.Curl() _libproxy_cache = None