diff --git a/gdrivefs/account_info.py b/gdrivefs/account_info.py index 834a8db..8073f90 100644 --- a/gdrivefs/account_info.py +++ b/gdrivefs/account_info.py @@ -9,10 +9,10 @@ class AccountInfo(LiveReaderBase): """Encapsulates our account info.""" - __map = {'root_id': u'rootFolderId', - 'largest_change_id': (u'largestChangeId', int), - 'quota_bytes_total': (u'quotaBytesTotal', int), - 'quota_bytes_used': (u'quotaBytesUsed', int)} + __map = {'root_id': 'rootFolderId', + 'largest_change_id': ('largestChangeId', int), + 'quota_bytes_total': ('quotaBytesTotal', int), + 'quota_bytes_used': ('quotaBytesUsed', int)} def get_data(self): gd = get_gdrive() @@ -33,5 +33,5 @@ def __getattr__(self, key): @property def keys(self): - return AccountInfo.__map.keys() + return list(AccountInfo.__map.keys()) diff --git a/gdrivefs/auto_auth.py b/gdrivefs/auto_auth.py index 7462f7f..7a35555 100644 --- a/gdrivefs/auto_auth.py +++ b/gdrivefs/auto_auth.py @@ -2,11 +2,11 @@ import threading import webbrowser import time -import urlparse +import urllib.parse -import SocketServer -import BaseHTTPServer -import cStringIO +import socketserver +import http.server +import io import gdrivefs.oauth_authorize import gdrivefs.conf @@ -14,9 +14,9 @@ _LOGGER = logging.getLogger(__name__) -class _HTTPRequest(BaseHTTPServer.BaseHTTPRequestHandler): +class _HTTPRequest(http.server.BaseHTTPRequestHandler): def __init__(self, request_text): - self.rfile = cStringIO.StringIO(request_text) + self.rfile = io.StringIO(request_text) self.raw_requestline = self.rfile.readline() self.error_code = self.error_message = None self.parse_request() @@ -77,7 +77,7 @@ def __thread(self): monitor = self # Embedding this because it's so trivial. - class Handler(BaseHTTPServer.BaseHTTPRequestHandler): + class Handler(http.server.BaseHTTPRequestHandler): def do_GET(self): # We have the first line of the response with the authorization code @@ -92,8 +92,8 @@ def do_GET(self): # line and another for a subsequent blank line to terminate the block # and conform with the RFC. hr = _HTTPRequest(self.requestline + "\n\n") - u = urlparse.urlparse(hr.path) - arguments = urlparse.parse_qs(u.query) + u = urllib.parse.urlparse(hr.path) + arguments = urllib.parse.parse_qs(u.query) # It's not an authorization response. Bail with the same error # the library would normally send for unhandled requests. @@ -130,9 +130,9 @@ def log_message(self, format, *args): pass - class Server(SocketServer.TCPServer): + class Server(socketserver.TCPServer): def server_activate(self, *args, **kwargs): - r = SocketServer.TCPServer.server_activate(self, *args, **kwargs) + r = socketserver.TCPServer.server_activate(self, *args, **kwargs) # Sniff the port, now that we're running. monitor._port = self.server_address[1] diff --git a/gdrivefs/cache_agent.py b/gdrivefs/cache_agent.py index 53f6781..908fd2c 100644 --- a/gdrivefs/cache_agent.py +++ b/gdrivefs/cache_agent.py @@ -96,10 +96,10 @@ def __cleanup(self): cache_dict = self.registry.list_raw(self.resource_name) total_keys = [ (key, value_tuple[1]) for key, value_tuple \ - in cache_dict.iteritems() ] + in cache_dict.items() ] cleanup_keys = [ key for key, value_tuple \ - in cache_dict.iteritems() \ + in cache_dict.items() \ if (datetime.datetime.now() - value_tuple[1]).seconds > \ self.max_age ] diff --git a/gdrivefs/chunked_download.py b/gdrivefs/chunked_download.py index 85377d6..9b333c3 100644 --- a/gdrivefs/chunked_download.py +++ b/gdrivefs/chunked_download.py @@ -71,7 +71,7 @@ def next_chunk(self, num_retries=0): self._progress, self._progress + self._chunksize) } - for retry_num in xrange(num_retries + 1): + for retry_num in range(num_retries + 1): _logger.debug("Attempting to read chunk. ATTEMPT=(%d)/(%d)", retry_num + 1, num_retries + 1) diff --git a/gdrivefs/conf.py b/gdrivefs/conf.py index 5310a3d..ec0181e 100644 --- a/gdrivefs/conf.py +++ b/gdrivefs/conf.py @@ -26,8 +26,8 @@ class Conf(object): file_chunk_size_kb = 1024 file_download_temp_max_age_s = 86400 change_check_frequency_s = 3 - hidden_flags_list_local = [u'trashed', u'restricted'] - hidden_flags_list_remote = [u'trashed'] + hidden_flags_list_local = ['trashed', 'restricted'] + hidden_flags_list_remote = ['trashed'] cache_cleanup_check_frequency_s = 60 cache_entries_max_age = 8 * 60 * 60 cache_status_post_frequency_s = 10 @@ -37,7 +37,7 @@ class Conf(object): google_discovery_service_url = DISCOVERY_URI default_buffer_read_blocksize = 65536 - directory_mimetype = u'application/vnd.google-apps.folder' + directory_mimetype = 'application/vnd.google-apps.folder' default_perm_folder = '777' default_perm_file_editable = '666' default_perm_file_noneditable = '444' diff --git a/gdrivefs/drive.py b/gdrivefs/drive.py index 5e321c5..9755452 100644 --- a/gdrivefs/drive.py +++ b/gdrivefs/drive.py @@ -4,7 +4,7 @@ import random import json import time -import httplib +import http.client import ssl import tempfile import pprint @@ -54,7 +54,7 @@ def wrapper(*args, **kwargs): for n in range(0, 5): try: return f(*args, **kwargs) - except (ssl.SSLError, httplib.BadStatusLine) as e: + except (ssl.SSLError, http.client.BadStatusLine) as e: # These happen sporadically. Use backoff. _logger.exception("There was a transient connection " "error (%s). Trying again [%s]: %s", @@ -178,8 +178,8 @@ def __init__(self): self.__auth = GdriveAuth() def __assert_response_kind(self, response, expected_kind): - actual_kind = response[u'kind'] - if actual_kind != unicode(expected_kind): + actual_kind = response['kind'] + if actual_kind != str(expected_kind): raise ValueError("Received response of type [%s] instead of " "[%s]." % (actual_kind, expected_kind)) @@ -209,31 +209,31 @@ def list_changes(self, start_change_id=None, page_token=None): self.__assert_response_kind(response, 'drive#changeList') - items = response[u'items'] + items = response['items'] if items: _logger.debug("We received (%d) changes to apply.", len(items)) - largest_change_id = int(response[u'largestChangeId']) - next_page_token = response.get(u'nextPageToken') + largest_change_id = int(response['largestChangeId']) + next_page_token = response.get('nextPageToken') changes = [] last_change_id = None for item in items: - change_id = int(item[u'id']) - entry_id = item[u'fileId'] + change_id = int(item['id']) + entry_id = item['fileId'] - if item[u'deleted']: + if item['deleted']: was_deleted = True entry = None _logger.debug("CHANGE: [%s] (DELETED)", entry_id) else: was_deleted = False - entry = item[u'file'] + entry = item['file'] _logger.debug("CHANGE: [%s] [%s] (UPDATED)", - entry_id, entry[u'title']) + entry_id, entry['title']) if was_deleted: normalized_entry = None @@ -260,7 +260,7 @@ def get_parents_containing_id(self, child_id, max_results=None): response = client.parents().list(fileId=child_id).execute() self.__assert_response_kind(response, 'drive#parentList') - return [ entry[u'id'] for entry in response[u'items'] ] + return [ entry['id'] for entry in response['items'] ] @_marshall def get_children_under_parent_id(self, @@ -298,7 +298,7 @@ def get_children_under_parent_id(self, self.__assert_response_kind(response, 'drive#childList') - return [ entry[u'id'] for entry in response[u'items'] ] + return [ entry['id'] for entry in response['items'] ] @_marshall def get_entries(self, entry_ids): @@ -373,9 +373,9 @@ def list_files(self, query_contains_string=None, query_is_string=None, self.__assert_response_kind(result, 'drive#fileList') _logger.debug("(%d) entries were presented for page-number " - "(%d).", len(result[u'items']), page_num) + "(%d).", len(result['items']), page_num) - for entry_raw in result[u'items']: + for entry_raw in result['items']: entry = \ gdrivefs.normal_entry.NormalEntry( 'list_files', @@ -383,14 +383,14 @@ def list_files(self, query_contains_string=None, query_is_string=None, entries.append(entry) - if u'nextPageToken' not in result: + if 'nextPageToken' not in result: _logger.debug("No more pages in file listing.") break _logger.debug("Next page-token in file-listing is [%s].", - result[u'nextPageToken']) + result['nextPageToken']) - page_token = result[u'nextPageToken'] + page_token = result['nextPageToken'] page_num += 1 return entries @@ -427,7 +427,7 @@ def download_to_local(self, output_file_path, normalized_entry, message = ("Entry with ID [%s] can not be exported to type [%s]. " "The available types are: %s" % (normalized_entry.id, mime_type, - ', '.join(normalized_entry.download_links.keys()))) + ', '.join(list(normalized_entry.download_links.keys())))) _logger.warning(message) raise gdrivefs.errors.ExportFormatError(message) diff --git a/gdrivefs/fsutility.py b/gdrivefs/fsutility.py index 8e591cb..9eec13a 100644 --- a/gdrivefs/fsutility.py +++ b/gdrivefs/fsutility.py @@ -39,7 +39,7 @@ def wrapper(*args, **kwargs): if args or kwargs: condensed = {} - for i in xrange(len(args)): + for i in range(len(args)): # Skip the 'self' argument. if i == 0: continue @@ -49,16 +49,16 @@ def wrapper(*args, **kwargs): condensed[argument_names[i - 1]] = args[i] - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): condensed[k] = v values_nice = [("%s= [%s]" % (k, v)) for k, v \ - in condensed.iteritems() \ + in condensed.items() \ if k not in excluded] if otherdata_cb: data = otherdata_cb(*args, **kwargs) - for k, v in data.iteritems(): + for k, v in data.items(): values_nice[k] = v if values_nice: diff --git a/gdrivefs/gdfuse.py b/gdrivefs/gdfuse.py index ff6df20..59aac43 100644 --- a/gdrivefs/gdfuse.py +++ b/gdrivefs/gdfuse.py @@ -781,7 +781,7 @@ def destroy(self, path): def listxattr(self, raw_path): (entry, path, filename) = get_entry_or_raise(raw_path) - return entry.xattr_data.keys() + return list(entry.xattr_data.keys()) @dec_hint(['path', 'name', 'position']) def getxattr(self, raw_path, name, position=0): diff --git a/gdrivefs/normal_entry.py b/gdrivefs/normal_entry.py index 57bde61..96f96ba 100644 --- a/gdrivefs/normal_entry.py +++ b/gdrivefs/normal_entry.py @@ -47,67 +47,67 @@ def __init__(self, gd_resource_type, raw_data): # can get a file-size up-front, or we have to decide on a specific # mime-type in order to do so. - requires_mimetype = u'fileSize' not in self.__raw_data and \ - raw_data[u'mimeType'] != self.__directory_mimetype + requires_mimetype = 'fileSize' not in self.__raw_data and \ + raw_data['mimeType'] != self.__directory_mimetype self.__info['requires_mimetype'] = \ requires_mimetype self.__info['title'] = \ - raw_data[u'title'] + raw_data['title'] self.__info['mime_type'] = \ - raw_data[u'mimeType'] + raw_data['mimeType'] self.__info['labels'] = \ - raw_data[u'labels'] + raw_data['labels'] self.__info['id'] = \ - raw_data[u'id'] + raw_data['id'] self.__info['last_modifying_user_name'] = \ - raw_data.get(u'lastModifyingUserName') + raw_data.get('lastModifyingUserName') self.__info['writers_can_share'] = \ - raw_data[u'writersCanShare'] + raw_data['writersCanShare'] self.__info['owner_names'] = \ - raw_data[u'ownerNames'] + raw_data['ownerNames'] self.__info['editable'] = \ - raw_data[u'editable'] + raw_data['editable'] self.__info['user_permission'] = \ - raw_data[u'userPermission'] + raw_data['userPermission'] self.__info['link'] = \ - raw_data.get(u'embedLink') + raw_data.get('embedLink') self.__info['file_size'] = \ - int(raw_data.get(u'fileSize', 0)) + int(raw_data.get('fileSize', 0)) self.__info['file_extension'] = \ - raw_data.get(u'fileExtension') + raw_data.get('fileExtension') self.__info['md5_checksum'] = \ - raw_data.get(u'md5Checksum') + raw_data.get('md5Checksum') self.__info['image_media_metadata'] = \ - raw_data.get(u'imageMediaMetadata') + raw_data.get('imageMediaMetadata') self.__info['download_links'] = \ - raw_data.get(u'exportLinks', {}) + raw_data.get('exportLinks', {}) try: self.__info['download_links'][self.__info['mime_type']] = \ - raw_data[u'downloadUrl'] + raw_data['downloadUrl'] except KeyError: pass self.__update_display_name() - for parent in raw_data[u'parents']: - self.__parents.append(parent[u'id']) + for parent in raw_data['parents']: + self.__parents.append(parent['id']) def __getattr__(self, key): return self.__info[key] @@ -169,7 +169,7 @@ def normalize_download_mimetype(self, specific_mimetype=None): # If there's only one download link, resort to using it (perhaps it was # an uploaded file, assigned only one type). elif len(self.download_links) == 1: - mime_type = self.download_links.keys()[0] + mime_type = list(self.download_links.keys())[0] else: raise ExportFormatError("A correct mime-type needs to be " @@ -185,7 +185,7 @@ def __convert(self, data): list_ = [("K(%s)=V(%s)" % (self.__convert(key), self.__convert(value))) \ for key, value \ - in data.iteritems()] + in data.items()] final = '; '.join(list_) return final @@ -194,7 +194,7 @@ def __convert(self, data): for element \ in data]) return final - elif isinstance(data, unicode): + elif isinstance(data, str): return utility.translate_filename_charset(data) elif isinstance(data, Number): return str(data) @@ -207,7 +207,7 @@ def get_data(self): original = { key.encode('utf8'): value for key, value - in self.__raw_data.iteritems() + in self.__raw_data.items() } distilled = self.__info @@ -231,8 +231,8 @@ def xattr_data(self): data_dict = self.get_data() attrs = {} - for a_type, a_dict in data_dict.iteritems(): - for key, value in a_dict.iteritems(): + for a_type, a_dict in data_dict.items(): + for key, value in a_dict.items(): fqkey = ('user.%s.%s' % (a_type, key)) attrs[fqkey] = self.__convert(value) @@ -249,7 +249,7 @@ def is_directory(self): def is_visible(self): if [ flag for flag, value - in self.labels.items() + in list(self.labels.items()) if flag in Conf.get('hidden_flags_list_local') and value ]: return False else: @@ -261,13 +261,13 @@ def parents(self): @property def download_types(self): - return self.download_links.keys() + return list(self.download_links.keys()) @property def modified_date(self): if 'modified_date' not in self.__cache_dict: self.__cache_dict['modified_date'] = \ - dateutil.parser.parse(self.__raw_data[u'modifiedDate']) + dateutil.parser.parse(self.__raw_data['modifiedDate']) return self.__cache_dict['modified_date'] @@ -281,7 +281,7 @@ def modified_date_epoch(self): def mtime_byme_date(self): if 'modified_byme_date' not in self.__cache_dict: self.__cache_dict['modified_byme_date'] = \ - dateutil.parser.parse(self.__raw_data[u'modifiedByMeDate']) + dateutil.parser.parse(self.__raw_data['modifiedByMeDate']) return self.__cache_dict['modified_byme_date'] @@ -293,8 +293,8 @@ def mtime_byme_date_epoch(self): def atime_byme_date(self): if 'viewed_byme_date' not in self.__cache_dict: self.__cache_dict['viewed_byme_date'] = \ - dateutil.parser.parse(self.__raw_data[u'lastViewedByMeDate']) \ - if u'lastViewedByMeDate' in self.__raw_data \ + dateutil.parser.parse(self.__raw_data['lastViewedByMeDate']) \ + if 'lastViewedByMeDate' in self.__raw_data \ else None return self.__cache_dict['viewed_byme_date'] diff --git a/gdrivefs/resources/requirements.txt b/gdrivefs/resources/requirements.txt index f32db6b..a81a654 100644 --- a/gdrivefs/resources/requirements.txt +++ b/gdrivefs/resources/requirements.txt @@ -1,9 +1,9 @@ google-api-python-client>=1.2 six>=1.7.3 -fusepy==2.0.2 -#gevent==1.0 -#gipc==0.4.0 -greenlet==0.4.2 +fusepy>=2.0.2 +gevent>=1.0 +gipc>=0.4.0 +greenlet>=0.4.2 httplib2>=0.9.2 -python-dateutil==2.2 +python-dateutil>=2.2 diff --git a/gdrivefs/utility.py b/gdrivefs/utility.py index 02a6e0f..331d90c 100644 --- a/gdrivefs/utility.py +++ b/gdrivefs/utility.py @@ -79,7 +79,7 @@ def get_first_mime_type_by_extension(self, extension): found = [ mime_type for mime_type, temp_extension - in self.default_extensions.iteritems() + in self.default_extensions.items() if temp_extension == extension ] diff --git a/gdrivefs/volume.py b/gdrivefs/volume.py index b1adc1d..81988b3 100644 --- a/gdrivefs/volume.py +++ b/gdrivefs/volume.py @@ -98,7 +98,7 @@ def remove_entry_recursive(self, entry_id, is_update=False): to_remove.extend(current_orphan_ids) to_remove.extend(children_ids_to_remove) - return (removed.keys(), (stat_folders + stat_files)) + return (list(removed.keys()), (stat_folders + stat_files)) def __remove_entry(self, entry_id, is_update=False): """Remove an entry. Updates references from linked entries, but does @@ -653,7 +653,7 @@ def __do_update_for_missing_entry(self, requested_entry_id): path_relations = PathRelations.get_instance() - for entry_id, entry in retrieved.iteritems(): + for entry_id, entry in retrieved.items(): path_relations.register_entry(entry) return retrieved