Package googleapiclient :: Module http
[hide private]
[frames] | no frames]

Source Code for Module googleapiclient.http

   1  # Copyright 2014 Google Inc. All Rights Reserved. 
   2  # 
   3  # Licensed under the Apache License, Version 2.0 (the "License"); 
   4  # you may not use this file except in compliance with the License. 
   5  # You may obtain a copy of the License at 
   6  # 
   7  #      http://www.apache.org/licenses/LICENSE-2.0 
   8  # 
   9  # Unless required by applicable law or agreed to in writing, software 
  10  # distributed under the License is distributed on an "AS IS" BASIS, 
  11  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  12  # See the License for the specific language governing permissions and 
  13  # limitations under the License. 
  14   
  15  """Classes to encapsulate a single HTTP request. 
  16   
  17  The classes implement a command pattern, with every 
  18  object supporting an execute() method that does the 
  19  actual HTTP request. 
  20  """ 
  21  from __future__ import absolute_import 
  22  import six 
  23  from six.moves import http_client 
  24  from six.moves import range 
  25   
  26  __author__ = "jcgregorio@google.com (Joe Gregorio)" 
  27   
  28  from six import BytesIO, StringIO 
  29  from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote 
  30   
  31  import base64 
  32  import copy 
  33  import gzip 
  34  import httplib2 
  35  import json 
  36  import logging 
  37  import mimetypes 
  38  import os 
  39  import random 
  40  import socket 
  41  import sys 
  42  import time 
  43  import uuid 
  44   
  45  # TODO(issue 221): Remove this conditional import jibbajabba. 
  46  try: 
  47      import ssl 
  48  except ImportError: 
  49      _ssl_SSLError = object() 
  50  else: 
  51      _ssl_SSLError = ssl.SSLError 
  52   
  53  from email.generator import Generator 
  54  from email.mime.multipart import MIMEMultipart 
  55  from email.mime.nonmultipart import MIMENonMultipart 
  56  from email.parser import FeedParser 
  57   
  58  from googleapiclient import _helpers as util 
  59   
  60  from googleapiclient import _auth 
  61  from googleapiclient.errors import BatchError 
  62  from googleapiclient.errors import HttpError 
  63  from googleapiclient.errors import InvalidChunkSizeError 
  64  from googleapiclient.errors import ResumableUploadError 
  65  from googleapiclient.errors import UnexpectedBodyError 
  66  from googleapiclient.errors import UnexpectedMethodError 
  67  from googleapiclient.model import JsonModel 
  68   
  69   
  70  LOGGER = logging.getLogger(__name__) 
  71   
  72  DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024 
  73   
  74  MAX_URI_LENGTH = 2048 
  75   
  76  MAX_BATCH_LIMIT = 1000 
  77   
  78  _TOO_MANY_REQUESTS = 429 
  79   
  80  DEFAULT_HTTP_TIMEOUT_SEC = 60 
  81   
  82  _LEGACY_BATCH_URI = "https://www.googleapis.com/batch" 
  83   
  84  if six.PY2: 
  85      # That's a builtin python3 exception, nonexistent in python2. 
  86      # Defined to None to avoid NameError while trying to catch it 
  87      ConnectionError = None 
88 89 90 -def _should_retry_response(resp_status, content):
91 """Determines whether a response should be retried. 92 93 Args: 94 resp_status: The response status received. 95 content: The response content body. 96 97 Returns: 98 True if the response should be retried, otherwise False. 99 """ 100 # Retry on 5xx errors. 101 if resp_status >= 500: 102 return True 103 104 # Retry on 429 errors. 105 if resp_status == _TOO_MANY_REQUESTS: 106 return True 107 108 # For 403 errors, we have to check for the `reason` in the response to 109 # determine if we should retry. 110 if resp_status == six.moves.http_client.FORBIDDEN: 111 # If there's no details about the 403 type, don't retry. 112 if not content: 113 return False 114 115 # Content is in JSON format. 116 try: 117 data = json.loads(content.decode("utf-8")) 118 if isinstance(data, dict): 119 reason = data["error"]["errors"][0]["reason"] 120 else: 121 reason = data[0]["error"]["errors"]["reason"] 122 except (UnicodeDecodeError, ValueError, KeyError): 123 LOGGER.warning("Invalid JSON content from response: %s", content) 124 return False 125 126 LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason) 127 128 # Only retry on rate limit related failures. 129 if reason in ("userRateLimitExceeded", "rateLimitExceeded"): 130 return True 131 132 # Everything else is a success or non-retriable so break. 133 return False
134
135 136 -def _retry_request( 137 http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs 138 ):
139 """Retries an HTTP request multiple times while handling errors. 140 141 If after all retries the request still fails, last error is either returned as 142 return value (for HTTP 5xx errors) or thrown (for ssl.SSLError). 143 144 Args: 145 http: Http object to be used to execute request. 146 num_retries: Maximum number of retries. 147 req_type: Type of the request (used for logging retries). 148 sleep, rand: Functions to sleep for random time between retries. 149 uri: URI to be requested. 150 method: HTTP method to be used. 151 args, kwargs: Additional arguments passed to http.request. 152 153 Returns: 154 resp, content - Response from the http request (may be HTTP 5xx). 155 """ 156 resp = None 157 content = None 158 exception = None 159 for retry_num in range(num_retries + 1): 160 if retry_num > 0: 161 # Sleep before retrying. 162 sleep_time = rand() * 2 ** retry_num 163 LOGGER.warning( 164 "Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s", 165 sleep_time, 166 retry_num, 167 num_retries, 168 req_type, 169 method, 170 uri, 171 resp.status if resp else exception, 172 ) 173 sleep(sleep_time) 174 175 try: 176 exception = None 177 resp, content = http.request(uri, method, *args, **kwargs) 178 # Retry on SSL errors and socket timeout errors. 179 except _ssl_SSLError as ssl_error: 180 exception = ssl_error 181 except socket.timeout as socket_timeout: 182 # It's important that this be before socket.error as it's a subclass 183 # socket.timeout has no errorcode 184 exception = socket_timeout 185 except ConnectionError as connection_error: 186 # Needs to be before socket.error as it's a subclass of 187 # OSError (socket.error) 188 exception = connection_error 189 except socket.error as socket_error: 190 # errno's contents differ by platform, so we have to match by name. 191 if socket.errno.errorcode.get(socket_error.errno) not in { 192 "WSAETIMEDOUT", 193 "ETIMEDOUT", 194 "EPIPE", 195 "ECONNABORTED", 196 }: 197 raise 198 exception = socket_error 199 except httplib2.ServerNotFoundError as server_not_found_error: 200 exception = server_not_found_error 201 202 if exception: 203 if retry_num == num_retries: 204 raise exception 205 else: 206 continue 207 208 if not _should_retry_response(resp.status, content): 209 break 210 211 return resp, content
212
213 214 -class MediaUploadProgress(object):
215 """Status of a resumable upload.""" 216
217 - def __init__(self, resumable_progress, total_size):
218 """Constructor. 219 220 Args: 221 resumable_progress: int, bytes sent so far. 222 total_size: int, total bytes in complete upload, or None if the total 223 upload size isn't known ahead of time. 224 """ 225 self.resumable_progress = resumable_progress 226 self.total_size = total_size
227
228 - def progress(self):
229 """Percent of upload completed, as a float. 230 231 Returns: 232 the percentage complete as a float, returning 0.0 if the total size of 233 the upload is unknown. 234 """ 235 if self.total_size is not None and self.total_size != 0: 236 return float(self.resumable_progress) / float(self.total_size) 237 else: 238 return 0.0
239
240 241 -class MediaDownloadProgress(object):
242 """Status of a resumable download.""" 243
244 - def __init__(self, resumable_progress, total_size):
245 """Constructor. 246 247 Args: 248 resumable_progress: int, bytes received so far. 249 total_size: int, total bytes in complete download. 250 """ 251 self.resumable_progress = resumable_progress 252 self.total_size = total_size
253
254 - def progress(self):
255 """Percent of download completed, as a float. 256 257 Returns: 258 the percentage complete as a float, returning 0.0 if the total size of 259 the download is unknown. 260 """ 261 if self.total_size is not None and self.total_size != 0: 262 return float(self.resumable_progress) / float(self.total_size) 263 else: 264 return 0.0
265
266 267 -class MediaUpload(object):
268 """Describes a media object to upload. 269 270 Base class that defines the interface of MediaUpload subclasses. 271 272 Note that subclasses of MediaUpload may allow you to control the chunksize 273 when uploading a media object. It is important to keep the size of the chunk 274 as large as possible to keep the upload efficient. Other factors may influence 275 the size of the chunk you use, particularly if you are working in an 276 environment where individual HTTP requests may have a hardcoded time limit, 277 such as under certain classes of requests under Google App Engine. 278 279 Streams are io.Base compatible objects that support seek(). Some MediaUpload 280 subclasses support using streams directly to upload data. Support for 281 streaming may be indicated by a MediaUpload sub-class and if appropriate for a 282 platform that stream will be used for uploading the media object. The support 283 for streaming is indicated by has_stream() returning True. The stream() method 284 should return an io.Base object that supports seek(). On platforms where the 285 underlying httplib module supports streaming, for example Python 2.6 and 286 later, the stream will be passed into the http library which will result in 287 less memory being used and possibly faster uploads. 288 289 If you need to upload media that can't be uploaded using any of the existing 290 MediaUpload sub-class then you can sub-class MediaUpload for your particular 291 needs. 292 """ 293
294 - def chunksize(self):
295 """Chunk size for resumable uploads. 296 297 Returns: 298 Chunk size in bytes. 299 """ 300 raise NotImplementedError()
301
302 - def mimetype(self):
303 """Mime type of the body. 304 305 Returns: 306 Mime type. 307 """ 308 return "application/octet-stream"
309
310 - def size(self):
311 """Size of upload. 312 313 Returns: 314 Size of the body, or None of the size is unknown. 315 """ 316 return None
317
318 - def resumable(self):
319 """Whether this upload is resumable. 320 321 Returns: 322 True if resumable upload or False. 323 """ 324 return False
325
326 - def getbytes(self, begin, end):
327 """Get bytes from the media. 328 329 Args: 330 begin: int, offset from beginning of file. 331 length: int, number of bytes to read, starting at begin. 332 333 Returns: 334 A string of bytes read. May be shorter than length if EOF was reached 335 first. 336 """ 337 raise NotImplementedError()
338
339 - def has_stream(self):
340 """Does the underlying upload support a streaming interface. 341 342 Streaming means it is an io.IOBase subclass that supports seek, i.e. 343 seekable() returns True. 344 345 Returns: 346 True if the call to stream() will return an instance of a seekable io.Base 347 subclass. 348 """ 349 return False
350
351 - def stream(self):
352 """A stream interface to the data being uploaded. 353 354 Returns: 355 The returned value is an io.IOBase subclass that supports seek, i.e. 356 seekable() returns True. 357 """ 358 raise NotImplementedError()
359 360 @util.positional(1)
361 - def _to_json(self, strip=None):
362 """Utility function for creating a JSON representation of a MediaUpload. 363 364 Args: 365 strip: array, An array of names of members to not include in the JSON. 366 367 Returns: 368 string, a JSON representation of this instance, suitable to pass to 369 from_json(). 370 """ 371 t = type(self) 372 d = copy.copy(self.__dict__) 373 if strip is not None: 374 for member in strip: 375 del d[member] 376 d["_class"] = t.__name__ 377 d["_module"] = t.__module__ 378 return json.dumps(d)
379
380 - def to_json(self):
381 """Create a JSON representation of an instance of MediaUpload. 382 383 Returns: 384 string, a JSON representation of this instance, suitable to pass to 385 from_json(). 386 """ 387 return self._to_json()
388 389 @classmethod
390 - def new_from_json(cls, s):
391 """Utility class method to instantiate a MediaUpload subclass from a JSON 392 representation produced by to_json(). 393 394 Args: 395 s: string, JSON from to_json(). 396 397 Returns: 398 An instance of the subclass of MediaUpload that was serialized with 399 to_json(). 400 """ 401 data = json.loads(s) 402 # Find and call the right classmethod from_json() to restore the object. 403 module = data["_module"] 404 m = __import__(module, fromlist=module.split(".")[:-1]) 405 kls = getattr(m, data["_class"]) 406 from_json = getattr(kls, "from_json") 407 return from_json(s)
408
409 410 -class MediaIoBaseUpload(MediaUpload):
411 """A MediaUpload for a io.Base objects. 412 413 Note that the Python file object is compatible with io.Base and can be used 414 with this class also. 415 416 fh = BytesIO('...Some data to upload...') 417 media = MediaIoBaseUpload(fh, mimetype='image/png', 418 chunksize=1024*1024, resumable=True) 419 farm.animals().insert( 420 id='cow', 421 name='cow.png', 422 media_body=media).execute() 423 424 Depending on the platform you are working on, you may pass -1 as the 425 chunksize, which indicates that the entire file should be uploaded in a single 426 request. If the underlying platform supports streams, such as Python 2.6 or 427 later, then this can be very efficient as it avoids multiple connections, and 428 also avoids loading the entire file into memory before sending it. Note that 429 Google App Engine has a 5MB limit on request size, so you should never set 430 your chunksize larger than 5MB, or to -1. 431 """ 432 433 @util.positional(3)
434 - def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
435 """Constructor. 436 437 Args: 438 fd: io.Base or file object, The source of the bytes to upload. MUST be 439 opened in blocking mode, do not use streams opened in non-blocking mode. 440 The given stream must be seekable, that is, it must be able to call 441 seek() on fd. 442 mimetype: string, Mime-type of the file. 443 chunksize: int, File will be uploaded in chunks of this many bytes. Only 444 used if resumable=True. Pass in a value of -1 if the file is to be 445 uploaded as a single chunk. Note that Google App Engine has a 5MB limit 446 on request size, so you should never set your chunksize larger than 5MB, 447 or to -1. 448 resumable: bool, True if this is a resumable upload. False means upload 449 in a single request. 450 """ 451 super(MediaIoBaseUpload, self).__init__() 452 self._fd = fd 453 self._mimetype = mimetype 454 if not (chunksize == -1 or chunksize > 0): 455 raise InvalidChunkSizeError() 456 self._chunksize = chunksize 457 self._resumable = resumable 458 459 self._fd.seek(0, os.SEEK_END) 460 self._size = self._fd.tell()
461
462 - def chunksize(self):
463 """Chunk size for resumable uploads. 464 465 Returns: 466 Chunk size in bytes. 467 """ 468 return self._chunksize
469
470 - def mimetype(self):
471 """Mime type of the body. 472 473 Returns: 474 Mime type. 475 """ 476 return self._mimetype
477
478 - def size(self):
479 """Size of upload. 480 481 Returns: 482 Size of the body, or None of the size is unknown. 483 """ 484 return self._size
485
486 - def resumable(self):
487 """Whether this upload is resumable. 488 489 Returns: 490 True if resumable upload or False. 491 """ 492 return self._resumable
493
494 - def getbytes(self, begin, length):
495 """Get bytes from the media. 496 497 Args: 498 begin: int, offset from beginning of file. 499 length: int, number of bytes to read, starting at begin. 500 501 Returns: 502 A string of bytes read. May be shorted than length if EOF was reached 503 first. 504 """ 505 self._fd.seek(begin) 506 return self._fd.read(length)
507
508 - def has_stream(self):
509 """Does the underlying upload support a streaming interface. 510 511 Streaming means it is an io.IOBase subclass that supports seek, i.e. 512 seekable() returns True. 513 514 Returns: 515 True if the call to stream() will return an instance of a seekable io.Base 516 subclass. 517 """ 518 return True
519
520 - def stream(self):
521 """A stream interface to the data being uploaded. 522 523 Returns: 524 The returned value is an io.IOBase subclass that supports seek, i.e. 525 seekable() returns True. 526 """ 527 return self._fd
528
529 - def to_json(self):
530 """This upload type is not serializable.""" 531 raise NotImplementedError("MediaIoBaseUpload is not serializable.")
532
533 534 -class MediaFileUpload(MediaIoBaseUpload):
535 """A MediaUpload for a file. 536 537 Construct a MediaFileUpload and pass as the media_body parameter of the 538 method. For example, if we had a service that allowed uploading images: 539 540 media = MediaFileUpload('cow.png', mimetype='image/png', 541 chunksize=1024*1024, resumable=True) 542 farm.animals().insert( 543 id='cow', 544 name='cow.png', 545 media_body=media).execute() 546 547 Depending on the platform you are working on, you may pass -1 as the 548 chunksize, which indicates that the entire file should be uploaded in a single 549 request. If the underlying platform supports streams, such as Python 2.6 or 550 later, then this can be very efficient as it avoids multiple connections, and 551 also avoids loading the entire file into memory before sending it. Note that 552 Google App Engine has a 5MB limit on request size, so you should never set 553 your chunksize larger than 5MB, or to -1. 554 """ 555 556 @util.positional(2)
557 - def __init__( 558 self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False 559 ):
560 """Constructor. 561 562 Args: 563 filename: string, Name of the file. 564 mimetype: string, Mime-type of the file. If None then a mime-type will be 565 guessed from the file extension. 566 chunksize: int, File will be uploaded in chunks of this many bytes. Only 567 used if resumable=True. Pass in a value of -1 if the file is to be 568 uploaded in a single chunk. Note that Google App Engine has a 5MB limit 569 on request size, so you should never set your chunksize larger than 5MB, 570 or to -1. 571 resumable: bool, True if this is a resumable upload. False means upload 572 in a single request. 573 """ 574 self._filename = filename 575 fd = open(self._filename, "rb") 576 if mimetype is None: 577 # No mimetype provided, make a guess. 578 mimetype, _ = mimetypes.guess_type(filename) 579 if mimetype is None: 580 # Guess failed, use octet-stream. 581 mimetype = "application/octet-stream" 582 super(MediaFileUpload, self).__init__( 583 fd, mimetype, chunksize=chunksize, resumable=resumable 584 )
585
586 - def __del__(self):
587 self._fd.close()
588
589 - def to_json(self):
590 """Creating a JSON representation of an instance of MediaFileUpload. 591 592 Returns: 593 string, a JSON representation of this instance, suitable to pass to 594 from_json(). 595 """ 596 return self._to_json(strip=["_fd"])
597 598 @staticmethod
599 - def from_json(s):
600 d = json.loads(s) 601 return MediaFileUpload( 602 d["_filename"], 603 mimetype=d["_mimetype"], 604 chunksize=d["_chunksize"], 605 resumable=d["_resumable"], 606 )
607
608 609 -class MediaInMemoryUpload(MediaIoBaseUpload):
610 """MediaUpload for a chunk of bytes. 611 612 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 613 the stream. 614 """ 615 616 @util.positional(2)
617 - def __init__( 618 self, 619 body, 620 mimetype="application/octet-stream", 621 chunksize=DEFAULT_CHUNK_SIZE, 622 resumable=False, 623 ):
624 """Create a new MediaInMemoryUpload. 625 626 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 627 the stream. 628 629 Args: 630 body: string, Bytes of body content. 631 mimetype: string, Mime-type of the file or default of 632 'application/octet-stream'. 633 chunksize: int, File will be uploaded in chunks of this many bytes. Only 634 used if resumable=True. 635 resumable: bool, True if this is a resumable upload. False means upload 636 in a single request. 637 """ 638 fd = BytesIO(body) 639 super(MediaInMemoryUpload, self).__init__( 640 fd, mimetype, chunksize=chunksize, resumable=resumable 641 )
642
643 644 -class MediaIoBaseDownload(object):
645 """"Download media resources. 646 647 Note that the Python file object is compatible with io.Base and can be used 648 with this class also. 649 650 651 Example: 652 request = farms.animals().get_media(id='cow') 653 fh = io.FileIO('cow.png', mode='wb') 654 downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024) 655 656 done = False 657 while done is False: 658 status, done = downloader.next_chunk() 659 if status: 660 print "Download %d%%." % int(status.progress() * 100) 661 print "Download Complete!" 662 """ 663 664 @util.positional(3)
665 - def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
666 """Constructor. 667 668 Args: 669 fd: io.Base or file object, The stream in which to write the downloaded 670 bytes. 671 request: googleapiclient.http.HttpRequest, the media request to perform in 672 chunks. 673 chunksize: int, File will be downloaded in chunks of this many bytes. 674 """ 675 self._fd = fd 676 self._request = request 677 self._uri = request.uri 678 self._chunksize = chunksize 679 self._progress = 0 680 self._total_size = None 681 self._done = False 682 683 # Stubs for testing. 684 self._sleep = time.sleep 685 self._rand = random.random 686 687 self._headers = {} 688 for k, v in six.iteritems(request.headers): 689 # allow users to supply custom headers by setting them on the request 690 # but strip out the ones that are set by default on requests generated by 691 # API methods like Drive's files().get(fileId=...) 692 if not k.lower() in ("accept", "accept-encoding", "user-agent"): 693 self._headers[k] = v
694 695 @util.positional(1)
696 - def next_chunk(self, num_retries=0):
697 """Get the next chunk of the download. 698 699 Args: 700 num_retries: Integer, number of times to retry with randomized 701 exponential backoff. If all retries fail, the raised HttpError 702 represents the last request. If zero (default), we attempt the 703 request only once. 704 705 Returns: 706 (status, done): (MediaDownloadProgress, boolean) 707 The value of 'done' will be True when the media has been fully 708 downloaded or the total size of the media is unknown. 709 710 Raises: 711 googleapiclient.errors.HttpError if the response was not a 2xx. 712 httplib2.HttpLib2Error if a transport error has occurred. 713 """ 714 headers = self._headers.copy() 715 headers["range"] = "bytes=%d-%d" % ( 716 self._progress, 717 self._progress + self._chunksize, 718 ) 719 http = self._request.http 720 721 resp, content = _retry_request( 722 http, 723 num_retries, 724 "media download", 725 self._sleep, 726 self._rand, 727 self._uri, 728 "GET", 729 headers=headers, 730 ) 731 732 if resp.status in [200, 206]: 733 if "content-location" in resp and resp["content-location"] != self._uri: 734 self._uri = resp["content-location"] 735 self._progress += len(content) 736 self._fd.write(content) 737 738 if "content-range" in resp: 739 content_range = resp["content-range"] 740 length = content_range.rsplit("/", 1)[1] 741 self._total_size = int(length) 742 elif "content-length" in resp: 743 self._total_size = int(resp["content-length"]) 744 745 if self._total_size is None or self._progress == self._total_size: 746 self._done = True 747 return MediaDownloadProgress(self._progress, self._total_size), self._done 748 else: 749 raise HttpError(resp, content, uri=self._uri)
750
751 752 -class _StreamSlice(object):
753 """Truncated stream. 754 755 Takes a stream and presents a stream that is a slice of the original stream. 756 This is used when uploading media in chunks. In later versions of Python a 757 stream can be passed to httplib in place of the string of data to send. The 758 problem is that httplib just blindly reads to the end of the stream. This 759 wrapper presents a virtual stream that only reads to the end of the chunk. 760 """ 761
762 - def __init__(self, stream, begin, chunksize):
763 """Constructor. 764 765 Args: 766 stream: (io.Base, file object), the stream to wrap. 767 begin: int, the seek position the chunk begins at. 768 chunksize: int, the size of the chunk. 769 """ 770 self._stream = stream 771 self._begin = begin 772 self._chunksize = chunksize 773 self._stream.seek(begin)
774
775 - def read(self, n=-1):
776 """Read n bytes. 777 778 Args: 779 n, int, the number of bytes to read. 780 781 Returns: 782 A string of length 'n', or less if EOF is reached. 783 """ 784 # The data left available to read sits in [cur, end) 785 cur = self._stream.tell() 786 end = self._begin + self._chunksize 787 if n == -1 or cur + n > end: 788 n = end - cur 789 return self._stream.read(n)
790
791 792 -class HttpRequest(object):
793 """Encapsulates a single HTTP request.""" 794 795 @util.positional(4)
796 - def __init__( 797 self, 798 http, 799 postproc, 800 uri, 801 method="GET", 802 body=None, 803 headers=None, 804 methodId=None, 805 resumable=None, 806 ):
807 """Constructor for an HttpRequest. 808 809 Args: 810 http: httplib2.Http, the transport object to use to make a request 811 postproc: callable, called on the HTTP response and content to transform 812 it into a data object before returning, or raising an exception 813 on an error. 814 uri: string, the absolute URI to send the request to 815 method: string, the HTTP method to use 816 body: string, the request body of the HTTP request, 817 headers: dict, the HTTP request headers 818 methodId: string, a unique identifier for the API method being called. 819 resumable: MediaUpload, None if this is not a resumbale request. 820 """ 821 self.uri = uri 822 self.method = method 823 self.body = body 824 self.headers = headers or {} 825 self.methodId = methodId 826 self.http = http 827 self.postproc = postproc 828 self.resumable = resumable 829 self.response_callbacks = [] 830 self._in_error_state = False 831 832 # The size of the non-media part of the request. 833 self.body_size = len(self.body or "") 834 835 # The resumable URI to send chunks to. 836 self.resumable_uri = None 837 838 # The bytes that have been uploaded. 839 self.resumable_progress = 0 840 841 # Stubs for testing. 842 self._rand = random.random 843 self._sleep = time.sleep
844 845 @util.positional(1)
846 - def execute(self, http=None, num_retries=0):
847 """Execute the request. 848 849 Args: 850 http: httplib2.Http, an http object to be used in place of the 851 one the HttpRequest request object was constructed with. 852 num_retries: Integer, number of times to retry with randomized 853 exponential backoff. If all retries fail, the raised HttpError 854 represents the last request. If zero (default), we attempt the 855 request only once. 856 857 Returns: 858 A deserialized object model of the response body as determined 859 by the postproc. 860 861 Raises: 862 googleapiclient.errors.HttpError if the response was not a 2xx. 863 httplib2.HttpLib2Error if a transport error has occurred. 864 """ 865 if http is None: 866 http = self.http 867 868 if self.resumable: 869 body = None 870 while body is None: 871 _, body = self.next_chunk(http=http, num_retries=num_retries) 872 return body 873 874 # Non-resumable case. 875 876 if "content-length" not in self.headers: 877 self.headers["content-length"] = str(self.body_size) 878 # If the request URI is too long then turn it into a POST request. 879 # Assume that a GET request never contains a request body. 880 if len(self.uri) > MAX_URI_LENGTH and self.method == "GET": 881 self.method = "POST" 882 self.headers["x-http-method-override"] = "GET" 883 self.headers["content-type"] = "application/x-www-form-urlencoded" 884 parsed = urlparse(self.uri) 885 self.uri = urlunparse( 886 (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, None) 887 ) 888 self.body = parsed.query 889 self.headers["content-length"] = str(len(self.body)) 890 891 # Handle retries for server-side errors. 892 resp, content = _retry_request( 893 http, 894 num_retries, 895 "request", 896 self._sleep, 897 self._rand, 898 str(self.uri), 899 method=str(self.method), 900 body=self.body, 901 headers=self.headers, 902 ) 903 904 for callback in self.response_callbacks: 905 callback(resp) 906 if resp.status >= 300: 907 raise HttpError(resp, content, uri=self.uri) 908 return self.postproc(resp, content)
909 910 @util.positional(2)
911 - def add_response_callback(self, cb):
912 """add_response_headers_callback 913 914 Args: 915 cb: Callback to be called on receiving the response headers, of signature: 916 917 def cb(resp): 918 # Where resp is an instance of httplib2.Response 919 """ 920 self.response_callbacks.append(cb)
921 922 @util.positional(1)
923 - def next_chunk(self, http=None, num_retries=0):
924 """Execute the next step of a resumable upload. 925 926 Can only be used if the method being executed supports media uploads and 927 the MediaUpload object passed in was flagged as using resumable upload. 928 929 Example: 930 931 media = MediaFileUpload('cow.png', mimetype='image/png', 932 chunksize=1000, resumable=True) 933 request = farm.animals().insert( 934 id='cow', 935 name='cow.png', 936 media_body=media) 937 938 response = None 939 while response is None: 940 status, response = request.next_chunk() 941 if status: 942 print "Upload %d%% complete." % int(status.progress() * 100) 943 944 945 Args: 946 http: httplib2.Http, an http object to be used in place of the 947 one the HttpRequest request object was constructed with. 948 num_retries: Integer, number of times to retry with randomized 949 exponential backoff. If all retries fail, the raised HttpError 950 represents the last request. If zero (default), we attempt the 951 request only once. 952 953 Returns: 954 (status, body): (ResumableMediaStatus, object) 955 The body will be None until the resumable media is fully uploaded. 956 957 Raises: 958 googleapiclient.errors.HttpError if the response was not a 2xx. 959 httplib2.HttpLib2Error if a transport error has occurred. 960 """ 961 if http is None: 962 http = self.http 963 964 if self.resumable.size() is None: 965 size = "*" 966 else: 967 size = str(self.resumable.size()) 968 969 if self.resumable_uri is None: 970 start_headers = copy.copy(self.headers) 971 start_headers["X-Upload-Content-Type"] = self.resumable.mimetype() 972 if size != "*": 973 start_headers["X-Upload-Content-Length"] = size 974 start_headers["content-length"] = str(self.body_size) 975 976 resp, content = _retry_request( 977 http, 978 num_retries, 979 "resumable URI request", 980 self._sleep, 981 self._rand, 982 self.uri, 983 method=self.method, 984 body=self.body, 985 headers=start_headers, 986 ) 987 988 if resp.status == 200 and "location" in resp: 989 self.resumable_uri = resp["location"] 990 else: 991 raise ResumableUploadError(resp, content) 992 elif self._in_error_state: 993 # If we are in an error state then query the server for current state of 994 # the upload by sending an empty PUT and reading the 'range' header in 995 # the response. 996 headers = {"Content-Range": "bytes */%s" % size, "content-length": "0"} 997 resp, content = http.request(self.resumable_uri, "PUT", headers=headers) 998 status, body = self._process_response(resp, content) 999 if body: 1000 # The upload was complete. 1001 return (status, body) 1002 1003 if self.resumable.has_stream(): 1004 data = self.resumable.stream() 1005 if self.resumable.chunksize() == -1: 1006 data.seek(self.resumable_progress) 1007 chunk_end = self.resumable.size() - self.resumable_progress - 1 1008 else: 1009 # Doing chunking with a stream, so wrap a slice of the stream. 1010 data = _StreamSlice( 1011 data, self.resumable_progress, self.resumable.chunksize() 1012 ) 1013 chunk_end = min( 1014 self.resumable_progress + self.resumable.chunksize() - 1, 1015 self.resumable.size() - 1, 1016 ) 1017 else: 1018 data = self.resumable.getbytes( 1019 self.resumable_progress, self.resumable.chunksize() 1020 ) 1021 1022 # A short read implies that we are at EOF, so finish the upload. 1023 if len(data) < self.resumable.chunksize(): 1024 size = str(self.resumable_progress + len(data)) 1025 1026 chunk_end = self.resumable_progress + len(data) - 1 1027 1028 headers = { 1029 "Content-Range": "bytes %d-%d/%s" 1030 % (self.resumable_progress, chunk_end, size), 1031 # Must set the content-length header here because httplib can't 1032 # calculate the size when working with _StreamSlice. 1033 "Content-Length": str(chunk_end - self.resumable_progress + 1), 1034 } 1035 1036 for retry_num in range(num_retries + 1): 1037 if retry_num > 0: 1038 self._sleep(self._rand() * 2 ** retry_num) 1039 LOGGER.warning( 1040 "Retry #%d for media upload: %s %s, following status: %d" 1041 % (retry_num, self.method, self.uri, resp.status) 1042 ) 1043 1044 try: 1045 resp, content = http.request( 1046 self.resumable_uri, method="PUT", body=data, headers=headers 1047 ) 1048 except: 1049 self._in_error_state = True 1050 raise 1051 if not _should_retry_response(resp.status, content): 1052 break 1053 1054 return self._process_response(resp, content)
1055
1056 - def _process_response(self, resp, content):
1057 """Process the response from a single chunk upload. 1058 1059 Args: 1060 resp: httplib2.Response, the response object. 1061 content: string, the content of the response. 1062 1063 Returns: 1064 (status, body): (ResumableMediaStatus, object) 1065 The body will be None until the resumable media is fully uploaded. 1066 1067 Raises: 1068 googleapiclient.errors.HttpError if the response was not a 2xx or a 308. 1069 """ 1070 if resp.status in [200, 201]: 1071 self._in_error_state = False 1072 return None, self.postproc(resp, content) 1073 elif resp.status == 308: 1074 self._in_error_state = False 1075 # A "308 Resume Incomplete" indicates we are not done. 1076 try: 1077 self.resumable_progress = int(resp["range"].split("-")[1]) + 1 1078 except KeyError: 1079 # If resp doesn't contain range header, resumable progress is 0 1080 self.resumable_progress = 0 1081 if "location" in resp: 1082 self.resumable_uri = resp["location"] 1083 else: 1084 self._in_error_state = True 1085 raise HttpError(resp, content, uri=self.uri) 1086 1087 return ( 1088 MediaUploadProgress(self.resumable_progress, self.resumable.size()), 1089 None, 1090 )
1091
1092 - def to_json(self):
1093 """Returns a JSON representation of the HttpRequest.""" 1094 d = copy.copy(self.__dict__) 1095 if d["resumable"] is not None: 1096 d["resumable"] = self.resumable.to_json() 1097 del d["http"] 1098 del d["postproc"] 1099 del d["_sleep"] 1100 del d["_rand"] 1101 1102 return json.dumps(d)
1103 1104 @staticmethod
1105 - def from_json(s, http, postproc):
1106 """Returns an HttpRequest populated with info from a JSON object.""" 1107 d = json.loads(s) 1108 if d["resumable"] is not None: 1109 d["resumable"] = MediaUpload.new_from_json(d["resumable"]) 1110 return HttpRequest( 1111 http, 1112 postproc, 1113 uri=d["uri"], 1114 method=d["method"], 1115 body=d["body"], 1116 headers=d["headers"], 1117 methodId=d["methodId"], 1118 resumable=d["resumable"], 1119 )
1120
1121 1122 -class BatchHttpRequest(object):
1123 """Batches multiple HttpRequest objects into a single HTTP request. 1124 1125 Example: 1126 from googleapiclient.http import BatchHttpRequest 1127 1128 def list_animals(request_id, response, exception): 1129 \"\"\"Do something with the animals list response.\"\"\" 1130 if exception is not None: 1131 # Do something with the exception. 1132 pass 1133 else: 1134 # Do something with the response. 1135 pass 1136 1137 def list_farmers(request_id, response, exception): 1138 \"\"\"Do something with the farmers list response.\"\"\" 1139 if exception is not None: 1140 # Do something with the exception. 1141 pass 1142 else: 1143 # Do something with the response. 1144 pass 1145 1146 service = build('farm', 'v2') 1147 1148 batch = BatchHttpRequest() 1149 1150 batch.add(service.animals().list(), list_animals) 1151 batch.add(service.farmers().list(), list_farmers) 1152 batch.execute(http=http) 1153 """ 1154 1155 @util.positional(1)
1156 - def __init__(self, callback=None, batch_uri=None):
1157 """Constructor for a BatchHttpRequest. 1158 1159 Args: 1160 callback: callable, A callback to be called for each response, of the 1161 form callback(id, response, exception). The first parameter is the 1162 request id, and the second is the deserialized response object. The 1163 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1164 occurred while processing the request, or None if no error occurred. 1165 batch_uri: string, URI to send batch requests to. 1166 """ 1167 if batch_uri is None: 1168 batch_uri = _LEGACY_BATCH_URI 1169 1170 if batch_uri == _LEGACY_BATCH_URI: 1171 LOGGER.warn( 1172 "You have constructed a BatchHttpRequest using the legacy batch " 1173 "endpoint %s. This endpoint will be turned down on March 25, 2019. " 1174 "Please provide the API-specific endpoint or use " 1175 "service.new_batch_http_request(). For more details see " 1176 "https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html" 1177 "and https://developers.google.com/api-client-library/python/guide/batch.", 1178 _LEGACY_BATCH_URI, 1179 ) 1180 self._batch_uri = batch_uri 1181 1182 # Global callback to be called for each individual response in the batch. 1183 self._callback = callback 1184 1185 # A map from id to request. 1186 self._requests = {} 1187 1188 # A map from id to callback. 1189 self._callbacks = {} 1190 1191 # List of request ids, in the order in which they were added. 1192 self._order = [] 1193 1194 # The last auto generated id. 1195 self._last_auto_id = 0 1196 1197 # Unique ID on which to base the Content-ID headers. 1198 self._base_id = None 1199 1200 # A map from request id to (httplib2.Response, content) response pairs 1201 self._responses = {} 1202 1203 # A map of id(Credentials) that have been refreshed. 1204 self._refreshed_credentials = {}
1205
1206 - def _refresh_and_apply_credentials(self, request, http):
1207 """Refresh the credentials and apply to the request. 1208 1209 Args: 1210 request: HttpRequest, the request. 1211 http: httplib2.Http, the global http object for the batch. 1212 """ 1213 # For the credentials to refresh, but only once per refresh_token 1214 # If there is no http per the request then refresh the http passed in 1215 # via execute() 1216 creds = None 1217 request_credentials = False 1218 1219 if request.http is not None: 1220 creds = _auth.get_credentials_from_http(request.http) 1221 request_credentials = True 1222 1223 if creds is None and http is not None: 1224 creds = _auth.get_credentials_from_http(http) 1225 1226 if creds is not None: 1227 if id(creds) not in self._refreshed_credentials: 1228 _auth.refresh_credentials(creds) 1229 self._refreshed_credentials[id(creds)] = 1 1230 1231 # Only apply the credentials if we are using the http object passed in, 1232 # otherwise apply() will get called during _serialize_request(). 1233 if request.http is None or not request_credentials: 1234 _auth.apply_credentials(creds, request.headers)
1235
1236 - def _id_to_header(self, id_):
1237 """Convert an id to a Content-ID header value. 1238 1239 Args: 1240 id_: string, identifier of individual request. 1241 1242 Returns: 1243 A Content-ID header with the id_ encoded into it. A UUID is prepended to 1244 the value because Content-ID headers are supposed to be universally 1245 unique. 1246 """ 1247 if self._base_id is None: 1248 self._base_id = uuid.uuid4() 1249 1250 # NB: we intentionally leave whitespace between base/id and '+', so RFC2822 1251 # line folding works properly on Python 3; see 1252 # https://github.com/google/google-api-python-client/issues/164 1253 return "<%s + %s>" % (self._base_id, quote(id_))
1254
1255 - def _header_to_id(self, header):
1256 """Convert a Content-ID header value to an id. 1257 1258 Presumes the Content-ID header conforms to the format that _id_to_header() 1259 returns. 1260 1261 Args: 1262 header: string, Content-ID header value. 1263 1264 Returns: 1265 The extracted id value. 1266 1267 Raises: 1268 BatchError if the header is not in the expected format. 1269 """ 1270 if header[0] != "<" or header[-1] != ">": 1271 raise BatchError("Invalid value for Content-ID: %s" % header) 1272 if "+" not in header: 1273 raise BatchError("Invalid value for Content-ID: %s" % header) 1274 base, id_ = header[1:-1].split(" + ", 1) 1275 1276 return unquote(id_)
1277
1278 - def _serialize_request(self, request):
1279 """Convert an HttpRequest object into a string. 1280 1281 Args: 1282 request: HttpRequest, the request to serialize. 1283 1284 Returns: 1285 The request as a string in application/http format. 1286 """ 1287 # Construct status line 1288 parsed = urlparse(request.uri) 1289 request_line = urlunparse( 1290 ("", "", parsed.path, parsed.params, parsed.query, "") 1291 ) 1292 status_line = request.method + " " + request_line + " HTTP/1.1\n" 1293 major, minor = request.headers.get("content-type", "application/json").split( 1294 "/" 1295 ) 1296 msg = MIMENonMultipart(major, minor) 1297 headers = request.headers.copy() 1298 1299 if request.http is not None: 1300 credentials = _auth.get_credentials_from_http(request.http) 1301 if credentials is not None: 1302 _auth.apply_credentials(credentials, headers) 1303 1304 # MIMENonMultipart adds its own Content-Type header. 1305 if "content-type" in headers: 1306 del headers["content-type"] 1307 1308 for key, value in six.iteritems(headers): 1309 msg[key] = value 1310 msg["Host"] = parsed.netloc 1311 msg.set_unixfrom(None) 1312 1313 if request.body is not None: 1314 msg.set_payload(request.body) 1315 msg["content-length"] = str(len(request.body)) 1316 1317 # Serialize the mime message. 1318 fp = StringIO() 1319 # maxheaderlen=0 means don't line wrap headers. 1320 g = Generator(fp, maxheaderlen=0) 1321 g.flatten(msg, unixfrom=False) 1322 body = fp.getvalue() 1323 1324 return status_line + body
1325
1326 - def _deserialize_response(self, payload):
1327 """Convert string into httplib2 response and content. 1328 1329 Args: 1330 payload: string, headers and body as a string. 1331 1332 Returns: 1333 A pair (resp, content), such as would be returned from httplib2.request. 1334 """ 1335 # Strip off the status line 1336 status_line, payload = payload.split("\n", 1) 1337 protocol, status, reason = status_line.split(" ", 2) 1338 1339 # Parse the rest of the response 1340 parser = FeedParser() 1341 parser.feed(payload) 1342 msg = parser.close() 1343 msg["status"] = status 1344 1345 # Create httplib2.Response from the parsed headers. 1346 resp = httplib2.Response(msg) 1347 resp.reason = reason 1348 resp.version = int(protocol.split("/", 1)[1].replace(".", "")) 1349 1350 content = payload.split("\r\n\r\n", 1)[1] 1351 1352 return resp, content
1353
1354 - def _new_id(self):
1355 """Create a new id. 1356 1357 Auto incrementing number that avoids conflicts with ids already used. 1358 1359 Returns: 1360 string, a new unique id. 1361 """ 1362 self._last_auto_id += 1 1363 while str(self._last_auto_id) in self._requests: 1364 self._last_auto_id += 1 1365 return str(self._last_auto_id)
1366 1367 @util.positional(2)
1368 - def add(self, request, callback=None, request_id=None):
1369 """Add a new request. 1370 1371 Every callback added will be paired with a unique id, the request_id. That 1372 unique id will be passed back to the callback when the response comes back 1373 from the server. The default behavior is to have the library generate it's 1374 own unique id. If the caller passes in a request_id then they must ensure 1375 uniqueness for each request_id, and if they are not an exception is 1376 raised. Callers should either supply all request_ids or never supply a 1377 request id, to avoid such an error. 1378 1379 Args: 1380 request: HttpRequest, Request to add to the batch. 1381 callback: callable, A callback to be called for this response, of the 1382 form callback(id, response, exception). The first parameter is the 1383 request id, and the second is the deserialized response object. The 1384 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1385 occurred while processing the request, or None if no errors occurred. 1386 request_id: string, A unique id for the request. The id will be passed 1387 to the callback with the response. 1388 1389 Returns: 1390 None 1391 1392 Raises: 1393 BatchError if a media request is added to a batch. 1394 KeyError is the request_id is not unique. 1395 """ 1396 1397 if len(self._order) >= MAX_BATCH_LIMIT: 1398 raise BatchError( 1399 "Exceeded the maximum calls(%d) in a single batch request." 1400 % MAX_BATCH_LIMIT 1401 ) 1402 if request_id is None: 1403 request_id = self._new_id() 1404 if request.resumable is not None: 1405 raise BatchError("Media requests cannot be used in a batch request.") 1406 if request_id in self._requests: 1407 raise KeyError("A request with this ID already exists: %s" % request_id) 1408 self._requests[request_id] = request 1409 self._callbacks[request_id] = callback 1410 self._order.append(request_id)
1411
1412 - def _execute(self, http, order, requests):
1413 """Serialize batch request, send to server, process response. 1414 1415 Args: 1416 http: httplib2.Http, an http object to be used to make the request with. 1417 order: list, list of request ids in the order they were added to the 1418 batch. 1419 request: list, list of request objects to send. 1420 1421 Raises: 1422 httplib2.HttpLib2Error if a transport error has occurred. 1423 googleapiclient.errors.BatchError if the response is the wrong format. 1424 """ 1425 message = MIMEMultipart("mixed") 1426 # Message should not write out it's own headers. 1427 setattr(message, "_write_headers", lambda self: None) 1428 1429 # Add all the individual requests. 1430 for request_id in order: 1431 request = requests[request_id] 1432 1433 msg = MIMENonMultipart("application", "http") 1434 msg["Content-Transfer-Encoding"] = "binary" 1435 msg["Content-ID"] = self._id_to_header(request_id) 1436 1437 body = self._serialize_request(request) 1438 msg.set_payload(body) 1439 message.attach(msg) 1440 1441 # encode the body: note that we can't use `as_string`, because 1442 # it plays games with `From ` lines. 1443 fp = StringIO() 1444 g = Generator(fp, mangle_from_=False) 1445 g.flatten(message, unixfrom=False) 1446 body = fp.getvalue() 1447 1448 headers = {} 1449 headers["content-type"] = ( 1450 "multipart/mixed; " 'boundary="%s"' 1451 ) % message.get_boundary() 1452 1453 resp, content = http.request( 1454 self._batch_uri, method="POST", body=body, headers=headers 1455 ) 1456 1457 if resp.status >= 300: 1458 raise HttpError(resp, content, uri=self._batch_uri) 1459 1460 # Prepend with a content-type header so FeedParser can handle it. 1461 header = "content-type: %s\r\n\r\n" % resp["content-type"] 1462 # PY3's FeedParser only accepts unicode. So we should decode content 1463 # here, and encode each payload again. 1464 if six.PY3: 1465 content = content.decode("utf-8") 1466 for_parser = header + content 1467 1468 parser = FeedParser() 1469 parser.feed(for_parser) 1470 mime_response = parser.close() 1471 1472 if not mime_response.is_multipart(): 1473 raise BatchError( 1474 "Response not in multipart/mixed format.", resp=resp, content=content 1475 ) 1476 1477 for part in mime_response.get_payload(): 1478 request_id = self._header_to_id(part["Content-ID"]) 1479 response, content = self._deserialize_response(part.get_payload()) 1480 # We encode content here to emulate normal http response. 1481 if isinstance(content, six.text_type): 1482 content = content.encode("utf-8") 1483 self._responses[request_id] = (response, content)
1484 1485 @util.positional(1)
1486 - def execute(self, http=None):
1487 """Execute all the requests as a single batched HTTP request. 1488 1489 Args: 1490 http: httplib2.Http, an http object to be used in place of the one the 1491 HttpRequest request object was constructed with. If one isn't supplied 1492 then use a http object from the requests in this batch. 1493 1494 Returns: 1495 None 1496 1497 Raises: 1498 httplib2.HttpLib2Error if a transport error has occurred. 1499 googleapiclient.errors.BatchError if the response is the wrong format. 1500 """ 1501 # If we have no requests return 1502 if len(self._order) == 0: 1503 return None 1504 1505 # If http is not supplied use the first valid one given in the requests. 1506 if http is None: 1507 for request_id in self._order: 1508 request = self._requests[request_id] 1509 if request is not None: 1510 http = request.http 1511 break 1512 1513 if http is None: 1514 raise ValueError("Missing a valid http object.") 1515 1516 # Special case for OAuth2Credentials-style objects which have not yet been 1517 # refreshed with an initial access_token. 1518 creds = _auth.get_credentials_from_http(http) 1519 if creds is not None: 1520 if not _auth.is_valid(creds): 1521 LOGGER.info("Attempting refresh to obtain initial access_token") 1522 _auth.refresh_credentials(creds) 1523 1524 self._execute(http, self._order, self._requests) 1525 1526 # Loop over all the requests and check for 401s. For each 401 request the 1527 # credentials should be refreshed and then sent again in a separate batch. 1528 redo_requests = {} 1529 redo_order = [] 1530 1531 for request_id in self._order: 1532 resp, content = self._responses[request_id] 1533 if resp["status"] == "401": 1534 redo_order.append(request_id) 1535 request = self._requests[request_id] 1536 self._refresh_and_apply_credentials(request, http) 1537 redo_requests[request_id] = request 1538 1539 if redo_requests: 1540 self._execute(http, redo_order, redo_requests) 1541 1542 # Now process all callbacks that are erroring, and raise an exception for 1543 # ones that return a non-2xx response? Or add extra parameter to callback 1544 # that contains an HttpError? 1545 1546 for request_id in self._order: 1547 resp, content = self._responses[request_id] 1548 1549 request = self._requests[request_id] 1550 callback = self._callbacks[request_id] 1551 1552 response = None 1553 exception = None 1554 try: 1555 if resp.status >= 300: 1556 raise HttpError(resp, content, uri=request.uri) 1557 response = request.postproc(resp, content) 1558 except HttpError as e: 1559 exception = e 1560 1561 if callback is not None: 1562 callback(request_id, response, exception) 1563 if self._callback is not None: 1564 self._callback(request_id, response, exception)
1565
1566 1567 -class HttpRequestMock(object):
1568 """Mock of HttpRequest. 1569 1570 Do not construct directly, instead use RequestMockBuilder. 1571 """ 1572
1573 - def __init__(self, resp, content, postproc):
1574 """Constructor for HttpRequestMock 1575 1576 Args: 1577 resp: httplib2.Response, the response to emulate coming from the request 1578 content: string, the response body 1579 postproc: callable, the post processing function usually supplied by 1580 the model class. See model.JsonModel.response() as an example. 1581 """ 1582 self.resp = resp 1583 self.content = content 1584 self.postproc = postproc 1585 if resp is None: 1586 self.resp = httplib2.Response({"status": 200, "reason": "OK"}) 1587 if "reason" in self.resp: 1588 self.resp.reason = self.resp["reason"]
1589
1590 - def execute(self, http=None):
1591 """Execute the request. 1592 1593 Same behavior as HttpRequest.execute(), but the response is 1594 mocked and not really from an HTTP request/response. 1595 """ 1596 return self.postproc(self.resp, self.content)
1597
1598 1599 -class RequestMockBuilder(object):
1600 """A simple mock of HttpRequest 1601 1602 Pass in a dictionary to the constructor that maps request methodIds to 1603 tuples of (httplib2.Response, content, opt_expected_body) that should be 1604 returned when that method is called. None may also be passed in for the 1605 httplib2.Response, in which case a 200 OK response will be generated. 1606 If an opt_expected_body (str or dict) is provided, it will be compared to 1607 the body and UnexpectedBodyError will be raised on inequality. 1608 1609 Example: 1610 response = '{"data": {"id": "tag:google.c...' 1611 requestBuilder = RequestMockBuilder( 1612 { 1613 'plus.activities.get': (None, response), 1614 } 1615 ) 1616 googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder) 1617 1618 Methods that you do not supply a response for will return a 1619 200 OK with an empty string as the response content or raise an excpetion 1620 if check_unexpected is set to True. The methodId is taken from the rpcName 1621 in the discovery document. 1622 1623 For more details see the project wiki. 1624 """ 1625
1626 - def __init__(self, responses, check_unexpected=False):
1627 """Constructor for RequestMockBuilder 1628 1629 The constructed object should be a callable object 1630 that can replace the class HttpResponse. 1631 1632 responses - A dictionary that maps methodIds into tuples 1633 of (httplib2.Response, content). The methodId 1634 comes from the 'rpcName' field in the discovery 1635 document. 1636 check_unexpected - A boolean setting whether or not UnexpectedMethodError 1637 should be raised on unsupplied method. 1638 """ 1639 self.responses = responses 1640 self.check_unexpected = check_unexpected
1641
1642 - def __call__( 1643 self, 1644 http, 1645 postproc, 1646 uri, 1647 method="GET", 1648 body=None, 1649 headers=None, 1650 methodId=None, 1651 resumable=None, 1652 ):
1653 """Implements the callable interface that discovery.build() expects 1654 of requestBuilder, which is to build an object compatible with 1655 HttpRequest.execute(). See that method for the description of the 1656 parameters and the expected response. 1657 """ 1658 if methodId in self.responses: 1659 response = self.responses[methodId] 1660 resp, content = response[:2] 1661 if len(response) > 2: 1662 # Test the body against the supplied expected_body. 1663 expected_body = response[2] 1664 if bool(expected_body) != bool(body): 1665 # Not expecting a body and provided one 1666 # or expecting a body and not provided one. 1667 raise UnexpectedBodyError(expected_body, body) 1668 if isinstance(expected_body, str): 1669 expected_body = json.loads(expected_body) 1670 body = json.loads(body) 1671 if body != expected_body: 1672 raise UnexpectedBodyError(expected_body, body) 1673 return HttpRequestMock(resp, content, postproc) 1674 elif self.check_unexpected: 1675 raise UnexpectedMethodError(methodId=methodId) 1676 else: 1677 model = JsonModel(False) 1678 return HttpRequestMock(None, "{}", model.response)
1679
1680 1681 -class HttpMock(object):
1682 """Mock of httplib2.Http""" 1683
1684 - def __init__(self, filename=None, headers=None):
1685 """ 1686 Args: 1687 filename: string, absolute filename to read response from 1688 headers: dict, header to return with response 1689 """ 1690 if headers is None: 1691 headers = {"status": "200"} 1692 if filename: 1693 f = open(filename, "rb") 1694 self.data = f.read() 1695 f.close() 1696 else: 1697 self.data = None 1698 self.response_headers = headers 1699 self.headers = None 1700 self.uri = None 1701 self.method = None 1702 self.body = None 1703 self.headers = None
1704
1705 - def request( 1706 self, 1707 uri, 1708 method="GET", 1709 body=None, 1710 headers=None, 1711 redirections=1, 1712 connection_type=None, 1713 ):
1714 self.uri = uri 1715 self.method = method 1716 self.body = body 1717 self.headers = headers 1718 return httplib2.Response(self.response_headers), self.data
1719
1720 1721 -class HttpMockSequence(object):
1722 """Mock of httplib2.Http 1723 1724 Mocks a sequence of calls to request returning different responses for each 1725 call. Create an instance initialized with the desired response headers 1726 and content and then use as if an httplib2.Http instance. 1727 1728 http = HttpMockSequence([ 1729 ({'status': '401'}, ''), 1730 ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'), 1731 ({'status': '200'}, 'echo_request_headers'), 1732 ]) 1733 resp, content = http.request("http://examples.com") 1734 1735 There are special values you can pass in for content to trigger 1736 behavours that are helpful in testing. 1737 1738 'echo_request_headers' means return the request headers in the response body 1739 'echo_request_headers_as_json' means return the request headers in 1740 the response body 1741 'echo_request_body' means return the request body in the response body 1742 'echo_request_uri' means return the request uri in the response body 1743 """ 1744
1745 - def __init__(self, iterable):
1746 """ 1747 Args: 1748 iterable: iterable, a sequence of pairs of (headers, body) 1749 """ 1750 self._iterable = iterable 1751 self.follow_redirects = True
1752
1753 - def request( 1754 self, 1755 uri, 1756 method="GET", 1757 body=None, 1758 headers=None, 1759 redirections=1, 1760 connection_type=None, 1761 ):
1762 resp, content = self._iterable.pop(0) 1763 content = six.ensure_binary(content) 1764 1765 if content == b"echo_request_headers": 1766 content = headers 1767 elif content == b"echo_request_headers_as_json": 1768 content = json.dumps(headers) 1769 elif content == b"echo_request_body": 1770 if hasattr(body, "read"): 1771 content = body.read() 1772 else: 1773 content = body 1774 elif content == b"echo_request_uri": 1775 content = uri 1776 if isinstance(content, six.text_type): 1777 content = content.encode("utf-8") 1778 return httplib2.Response(resp), content
1779
1780 1781 -def set_user_agent(http, user_agent):
1782 """Set the user-agent on every request. 1783 1784 Args: 1785 http - An instance of httplib2.Http 1786 or something that acts like it. 1787 user_agent: string, the value for the user-agent header. 1788 1789 Returns: 1790 A modified instance of http that was passed in. 1791 1792 Example: 1793 1794 h = httplib2.Http() 1795 h = set_user_agent(h, "my-app-name/6.0") 1796 1797 Most of the time the user-agent will be set doing auth, this is for the rare 1798 cases where you are accessing an unauthenticated endpoint. 1799 """ 1800 request_orig = http.request 1801 1802 # The closure that will replace 'httplib2.Http.request'. 1803 def new_request( 1804 uri, 1805 method="GET", 1806 body=None, 1807 headers=None, 1808 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1809 connection_type=None, 1810 ): 1811 """Modify the request headers to add the user-agent.""" 1812 if headers is None: 1813 headers = {} 1814 if "user-agent" in headers: 1815 headers["user-agent"] = user_agent + " " + headers["user-agent"] 1816 else: 1817 headers["user-agent"] = user_agent 1818 resp, content = request_orig( 1819 uri, 1820 method=method, 1821 body=body, 1822 headers=headers, 1823 redirections=redirections, 1824 connection_type=connection_type, 1825 ) 1826 return resp, content
1827 1828 http.request = new_request 1829 return http 1830
1831 1832 -def tunnel_patch(http):
1833 """Tunnel PATCH requests over POST. 1834 Args: 1835 http - An instance of httplib2.Http 1836 or something that acts like it. 1837 1838 Returns: 1839 A modified instance of http that was passed in. 1840 1841 Example: 1842 1843 h = httplib2.Http() 1844 h = tunnel_patch(h, "my-app-name/6.0") 1845 1846 Useful if you are running on a platform that doesn't support PATCH. 1847 Apply this last if you are using OAuth 1.0, as changing the method 1848 will result in a different signature. 1849 """ 1850 request_orig = http.request 1851 1852 # The closure that will replace 'httplib2.Http.request'. 1853 def new_request( 1854 uri, 1855 method="GET", 1856 body=None, 1857 headers=None, 1858 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1859 connection_type=None, 1860 ): 1861 """Modify the request headers to add the user-agent.""" 1862 if headers is None: 1863 headers = {} 1864 if method == "PATCH": 1865 if "oauth_token" in headers.get("authorization", ""): 1866 LOGGER.warning( 1867 "OAuth 1.0 request made with Credentials after tunnel_patch." 1868 ) 1869 headers["x-http-method-override"] = "PATCH" 1870 method = "POST" 1871 resp, content = request_orig( 1872 uri, 1873 method=method, 1874 body=body, 1875 headers=headers, 1876 redirections=redirections, 1877 connection_type=connection_type, 1878 ) 1879 return resp, content
1880 1881 http.request = new_request 1882 return http 1883
1884 1885 -def build_http():
1886 """Builds httplib2.Http object 1887 1888 Returns: 1889 A httplib2.Http object, which is used to make http requests, and which has timeout set by default. 1890 To override default timeout call 1891 1892 socket.setdefaulttimeout(timeout_in_sec) 1893 1894 before interacting with this method. 1895 """ 1896 if socket.getdefaulttimeout() is not None: 1897 http_timeout = socket.getdefaulttimeout() 1898 else: 1899 http_timeout = DEFAULT_HTTP_TIMEOUT_SEC 1900 http = httplib2.Http(timeout=http_timeout) 1901 # 308's are used by several Google APIs (Drive, YouTube) 1902 # for Resumable Uploads rather than Permanent Redirects. 1903 # This asks httplib2 to exclude 308s from the status codes 1904 # it treats as redirects 1905 try: 1906 http.redirect_codes = http.redirect_codes - {308} 1907 except AttributeError: 1908 # Apache Beam tests depend on this library and cannot 1909 # currently upgrade their httplib2 version 1910 # http.redirect_codes does not exist in previous versions 1911 # of httplib2, so pass 1912 pass 1913 1914 return http
1915