Skip to content

Commit 668ecd3

Browse files
committed
fix some pep8 and typos
1 parent b50d7a6 commit 668ecd3

File tree

17 files changed

+48
-44
lines changed

17 files changed

+48
-44
lines changed

docs/client/overview.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ For example, to run a new job for a given spider with custom parameters::
144144

145145

146146

147-
Geting job information
147+
Getting job information
148148
^^^^^^^^^^^^^^^^^^^^^^
149149

150150
To select a specific job for a project, use ``.jobs.get(<jobKey>)``::
@@ -387,7 +387,7 @@ acts like a Python dictionary::
387387
'5123a86-master'
388388

389389
To check what keys are available (they ultimately depend on the job),
390-
you can use its ``.iter()`` method (here, it's wrapped inside a dict for readibility)::
390+
you can use its ``.iter()`` method (here, it's wrapped inside a dict for readability)::
391391

392392
>>> dict(job.metadata.iter())
393393
{...

docs/conf.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,7 @@
172172
html_theme = 'sphinx_rtd_theme'
173173
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
174174

175+
175176
# disable cross-reference for ivar
176177
# patch taken from http://stackoverflow.com/a/41184353/1932023
177178
def patched_make_field(self, types, domain, items, env=None):

scrapinghub/client/jobs.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ def count(self, spider=None, state=None, has_tag=None, lacks_tag=None,
5454
:param lacks_tag: (optional) filter results by missing tag(s), a string
5555
or a list of strings.
5656
:param startts: (optional) UNIX timestamp at which to begin results,
57-
in millisecons.
57+
in milliseconds.
5858
:param endts: (optional) UNIX timestamp at which to end results,
59-
in millisecons.
59+
in milliseconds.
6060
:param \*\*params: (optional) other filter params.
6161
6262
:return: jobs count.
@@ -222,9 +222,9 @@ def list(self, count=None, start=None, spider=None, state=None,
222222
:param lacks_tag: (optional) filter results by missing tag(s), a string
223223
or a list of strings.
224224
:param startts: (optional) UNIX timestamp at which to begin results,
225-
in millisecons.
225+
in milliseconds.
226226
:param endts: (optional) UNIX timestamp at which to end results,
227-
in millisecons.
227+
in milliseconds.
228228
:param meta: (optional) request for additional fields, a single
229229
field name or a list of field names to return.
230230
:param \*\*params: (optional) other filter params.

scrapinghub/client/utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def update_kwargs(kwargs, **params):
8989

9090

9191
def parse_auth(auth):
92-
"""Parse authentification token.
92+
"""Parse authentication token.
9393
9494
>>> os.environ['SH_APIKEY'] = 'apikey'
9595
>>> parse_auth(None)
@@ -106,7 +106,7 @@ def parse_auth(auth):
106106
if auth is None:
107107
apikey = os.environ.get('SH_APIKEY')
108108
if apikey:
109-
return (apikey, '')
109+
return apikey, ''
110110

111111
jobauth = os.environ.get('SHUB_JOBAUTH')
112112
if jobauth:
@@ -131,7 +131,7 @@ def parse_auth(auth):
131131
return jwt_auth
132132

133133
login, _, password = auth.partition(':')
134-
return (login, password)
134+
return login, password
135135

136136

137137
def _search_for_jwt_credentials(auth):
@@ -144,6 +144,6 @@ def _search_for_jwt_credentials(auth):
144144
decoded_auth = decoded_auth.decode('ascii')
145145
login, _, password = decoded_auth.partition(':')
146146
if password and parse_job_key(login):
147-
return (login, password)
147+
return login, password
148148
except (UnicodeDecodeError, ValueError):
149149
pass

scrapinghub/hubstorage/batchuploader.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,7 @@ def _upload(self, batch):
196196
headers=headers,
197197
)
198198

199+
199200
class ValueTooLarge(ValueError):
200201
"""Raised when a serialized item is greater than 1MB"""
201202

scrapinghub/hubstorage/collectionsrt.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -66,21 +66,22 @@ def truncate(self, _name):
6666
return self.apipost('delete', params={'name': _name}, is_idempotent=True)
6767

6868
def iter_json(self, _type, _name, requests_params=None, **apiparams):
69-
return DownloadableResource.iter_json(self, (_type, _name),
70-
requests_params=requests_params, **apiparams)
69+
return DownloadableResource.iter_json(
70+
self, (_type, _name), requests_params=requests_params, **apiparams
71+
)
7172

7273
def iter_msgpack(self, _type, _name, requests_params=None, **apiparams):
73-
return DownloadableResource.iter_msgpack(self, (_type, _name),
74-
requests_params=requests_params, **apiparams)
74+
return DownloadableResource.iter_msgpack(
75+
self, (_type, _name), requests_params=requests_params, **apiparams
76+
)
7577

7678
def create_writer(self, coltype, colname, **writer_kwargs):
7779
self._validate_collection(coltype, colname)
7880
kwargs = dict(writer_kwargs)
7981
kwargs.setdefault('content_encoding', 'gzip')
8082
kwargs.setdefault('auth', self.auth)
8183
url = urlpathjoin(self.url, coltype, colname)
82-
return self.client.batchuploader.create_writer(url,
83-
**kwargs)
84+
return self.client.batchuploader.create_writer(url, **kwargs)
8485

8586
def new_collection(self, coltype, colname):
8687
self._validate_collection(coltype, colname)
@@ -109,15 +110,14 @@ def _validate_collection(self, coltype, colname):
109110
raise ValueError('Invalid collection name {!r}, only alphanumeric '
110111
'characters'.format(colname))
111112

112-
113113
def _batch(self, method, path, total_param, progress=None, **params):
114114
total = 0
115115
getparams = dict(params)
116116
try:
117117
while True:
118118
r = next(self.apirequest(
119119
path, method=method, params=getparams,
120-
is_idempotent=method=='GET',
120+
is_idempotent=method == 'GET',
121121
))
122122
total += r[total_param]
123123
next_start = r.get('nextstart')
@@ -147,8 +147,7 @@ def create_writer(self, **kwargs):
147147
kwargs are passed to batchuploader.create_writer, but auth and gzip
148148
content encoding are specified if not provided
149149
"""
150-
return self._collections.create_writer(self.coltype, self.colname,
151-
**kwargs)
150+
return self._collections.create_writer(self.coltype, self.colname, **kwargs)
152151

153152
def get(self, *args, **kwargs):
154153
return self._collections.get(self.coltype, self.colname, *args, **kwargs)
@@ -166,9 +165,11 @@ def count(self, *args, **kwargs):
166165
return self._collections.count(self.coltype, self.colname, *args, **kwargs)
167166

168167
def iter_json(self, requests_params=None, **apiparams):
169-
return self._collections.iter_json(self.coltype, self.colname,
170-
requests_params=requests_params, **apiparams)
168+
return self._collections.iter_json(
169+
self.coltype, self.colname, requests_params=requests_params, **apiparams
170+
)
171171

172172
def iter_values(self, requests_params=None, **apiparams):
173-
return self._collections.iter_values(self.coltype, self.colname,
174-
requests_params=requests_params, **apiparams)
173+
return self._collections.iter_values(
174+
self.coltype, self.colname, requests_params=requests_params, **apiparams
175+
)

scrapinghub/hubstorage/job.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22
from .resourcetype import (ItemsResourceType, DownloadableResource,
3-
MappingResourceType)
3+
MappingResourceType)
44
from .utils import millitime, urlpathjoin
55
from .jobq import JobQ
66

scrapinghub/hubstorage/project.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def __init__(self, client, projectid, auth=None):
1515
self.client = client
1616
self.projectid = urlpathjoin(projectid)
1717
assert len(self.projectid.split('/')) == 1, \
18-
'projectkey must be just one id: %s' % projectid
18+
'projectkey must be just one id: %s' % projectid
1919
self.auth = xauth(auth) or client.auth
2020
self.jobs = Jobs(client, self.projectid, auth=auth)
2121
self.items = Items(client, self.projectid, auth=auth)
@@ -68,6 +68,7 @@ class Jobs(ResourceType):
6868
def list(self, _key=None, **params):
6969
return self.apiget(_key, params=params)
7070

71+
7172
class Items(ResourceType):
7273

7374
resource_type = 'items'

scrapinghub/hubstorage/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def urlpathjoin(*parts):
4545

4646

4747
def xauth(auth):
48-
"""Expand authentification token
48+
"""Expand authentication token
4949
5050
>>> xauth(None)
5151
>>> xauth(('user', 'pass'))

scrapinghub/legacy.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def __init__(self, apikey=None, password='', _old_passwd='',
6161
raise RuntimeError("No API key provided and SH_APIKEY environment variable not set")
6262

6363
assert not apikey.startswith('http://'), \
64-
"Instantiating scrapinghub.Connection with url as first argument is not supported"
64+
"Instantiating scrapinghub.Connection with url as first argument is not supported"
6565
if password:
6666
warnings.warn("A lot of endpoints support authentication only via apikey.")
6767
self.apikey = apikey
@@ -77,7 +77,7 @@ def __repr__(self):
7777
def auth(self):
7878
warnings.warn("'auth' connection attribute is deprecated, "
7979
"use 'apikey' attribute instead", stacklevel=2)
80-
return (self.apikey, self.password)
80+
return self.apikey, self.password
8181

8282
def _create_session(self):
8383
from requests import session
@@ -169,8 +169,8 @@ def _decode_response(self, response, format, raw):
169169
raise APIError("JSON response does not contain status")
170170
else: # jl
171171
return (json.loads(line.decode('utf-8')
172-
if isinstance(line, _BINARY_TYPE) else line)
173-
for line in response.iter_lines())
172+
if isinstance(line, _BINARY_TYPE) else line)
173+
for line in response.iter_lines())
174174

175175
##
176176
## public methods

0 commit comments

Comments
 (0)