46
46
#
47
47
# .. code-block:: sh
48
48
#
49
- # $ pip install Flask==2.0.1 torchvision==0.10.0
49
+ # pip install Flask==2.0.1 torchvision==0.10.0
50
50
51
51
52
52
######################################################################
64
64
def hello ():
65
65
return 'Hello World!'
66
66
67
- ###############################################################################
68
- # Save the above snippet in a file called ``app.py`` and you can now run a
69
- # Flask development server by typing:
70
- #
71
- # .. code-block:: sh
72
- #
73
- # $ FLASK_ENV=development FLASK_APP=app.py flask run
74
-
75
- ###############################################################################
76
- # When you visit ``http://localhost:5000/`` in your web browser, you will be
77
- # greeted with ``Hello World!`` text
78
-
79
- ###############################################################################
80
- # We will make slight changes to the above snippet, so that it suits our API
81
- # definition. First, we will rename the method to ``predict``. We will update
82
- # the endpoint path to ``/predict``. Since the image files will be sent via
83
- # HTTP POST requests, we will update it so that it also accepts only POST
84
- # requests:
85
-
86
-
87
- @app .route ('/predict' , methods = ['POST' ])
88
- def predict ():
89
- return 'Hello World!'
90
-
91
67
###############################################################################
92
68
# We will also change the response type, so that it returns a JSON response
93
69
# containing ImageNet class id and name. The updated ``app.py`` file will
@@ -137,7 +113,6 @@ def transform_image(image_bytes):
137
113
image = Image .open (io .BytesIO (image_bytes ))
138
114
return my_transforms (image ).unsqueeze (0 )
139
115
140
-
141
116
######################################################################
142
117
# The above method takes image data in bytes, applies the series of transforms
143
118
# and returns a tensor. To test the above method, read an image file in
@@ -173,7 +148,6 @@ def get_prediction(image_bytes):
173
148
_ , y_hat = outputs .max (1 )
174
149
return y_hat
175
150
176
-
177
151
######################################################################
178
152
# The tensor ``y_hat`` will contain the index of the predicted class id.
179
153
# However, we need a human readable class name. For that we need a class id
@@ -217,18 +191,6 @@ def get_prediction(image_bytes):
217
191
# The first item in array is ImageNet class id and second item is the human
218
192
# readable name.
219
193
#
220
- # .. note::
221
- # Did you notice that ``model`` variable is not part of ``get_prediction``
222
- # method? Or why is model a global variable? Loading a model can be an
223
- # expensive operation in terms of memory and compute. If we loaded the model in the
224
- # ``get_prediction`` method, then it would get unnecessarily loaded every
225
- # time the method is called. Since, we are building a web server, there
226
- # could be thousands of requests per second, we should not waste time
227
- # redundantly loading the model for every inference. So, we keep the model
228
- # loaded in memory just once. In
229
- # production systems, it's necessary to be efficient about your use of
230
- # compute to be able to serve requests at scale, so you should generally
231
- # load your model before serving requests.
232
194
233
195
######################################################################
234
196
# Integrating the model in our API Server
@@ -251,66 +213,68 @@ def get_prediction(image_bytes):
251
213
# img_bytes = file.read()
252
214
# class_id, class_name = get_prediction(image_bytes=img_bytes)
253
215
# return jsonify({'class_id': class_id, 'class_name': class_name})
254
-
216
+ #
217
+ #
255
218
######################################################################
256
219
# The ``app.py`` file is now complete. Following is the full version; replace
257
220
# the paths with the paths where you saved your files and it should run:
258
221
#
259
222
# .. code-block:: python
260
223
#
261
- # import io
262
- # import json
224
+ # import io
225
+ # import json
263
226
#
264
- # from torchvision import models
265
- # import torchvision.transforms as transforms
266
- # from PIL import Image
267
- # from flask import Flask, jsonify, request
227
+ # from torchvision import models
228
+ # import torchvision.transforms as transforms
229
+ # from PIL import Image
230
+ # from flask import Flask, jsonify, request
268
231
#
269
232
#
270
- # app = Flask(__name__)
271
- # imagenet_class_index = json.load(open('<PATH/TO/.json/FILE>/imagenet_class_index.json'))
272
- # model = models.densenet121(weights='IMAGENET1K_V1')
273
- # model.eval()
233
+ # app = Flask(__name__)
234
+ # imagenet_class_index = json.load(open('<PATH/TO/.json/FILE>/imagenet_class_index.json'))
235
+ # model = models.densenet121(weights='IMAGENET1K_V1')
236
+ # model.eval()
274
237
#
275
238
#
276
- # def transform_image(image_bytes):
277
- # my_transforms = transforms.Compose([transforms.Resize(255),
278
- # transforms.CenterCrop(224),
279
- # transforms.ToTensor(),
280
- # transforms.Normalize(
281
- # [0.485, 0.456, 0.406],
282
- # [0.229, 0.224, 0.225])])
283
- # image = Image.open(io.BytesIO(image_bytes))
284
- # return my_transforms(image).unsqueeze(0)
239
+ # def transform_image(image_bytes):
240
+ # my_transforms = transforms.Compose([transforms.Resize(255),
241
+ # transforms.CenterCrop(224),
242
+ # transforms.ToTensor(),
243
+ # transforms.Normalize(
244
+ # [0.485, 0.456, 0.406],
245
+ # [0.229, 0.224, 0.225])])
246
+ # image = Image.open(io.BytesIO(image_bytes))
247
+ # return my_transforms(image).unsqueeze(0)
285
248
#
286
249
#
287
- # def get_prediction(image_bytes):
288
- # tensor = transform_image(image_bytes=image_bytes)
289
- # outputs = model.forward(tensor)
290
- # _, y_hat = outputs.max(1)
291
- # predicted_idx = str(y_hat.item())
292
- # return imagenet_class_index[predicted_idx]
250
+ # def get_prediction(image_bytes):
251
+ # tensor = transform_image(image_bytes=image_bytes)
252
+ # outputs = model.forward(tensor)
253
+ # _, y_hat = outputs.max(1)
254
+ # predicted_idx = str(y_hat.item())
255
+ # return imagenet_class_index[predicted_idx]
293
256
#
294
257
#
295
- # @app.route('/predict', methods=['POST'])
296
- # def predict():
297
- # if request.method == 'POST':
298
- # file = request.files['file']
299
- # img_bytes = file.read()
300
- # class_id, class_name = get_prediction(image_bytes=img_bytes)
301
- # return jsonify({'class_id': class_id, 'class_name': class_name})
258
+ # @app.route('/predict', methods=['POST'])
259
+ # def predict():
260
+ # if request.method == 'POST':
261
+ # file = request.files['file']
262
+ # img_bytes = file.read()
263
+ # class_id, class_name = get_prediction(image_bytes=img_bytes)
264
+ # return jsonify({'class_id': class_id, 'class_name': class_name})
265
+ #
266
+ #
267
+ # if __name__ == '__main__':
268
+ # app.run()
302
269
#
303
270
#
304
- # if __name__ == '__main__':
305
- # app.run()
306
-
307
271
######################################################################
308
272
# Let's test our web server! Run:
309
273
#
310
274
# .. code-block:: sh
311
275
#
312
- # $ FLASK_ENV=development FLASK_APP=app.py flask run
313
-
276
+ # FLASK_ENV=development FLASK_APP=app.py flask run
277
+ #
314
278
#######################################################################
315
279
# We can use the
316
280
# `requests <https://pypi.org/project/requests/>`_
@@ -322,6 +286,7 @@ def get_prediction(image_bytes):
322
286
#
323
287
# resp = requests.post("http://localhost:5000/predict",
324
288
# files={"file": open('<PATH/TO/.jpg/FILE>/cat.jpg','rb')})
289
+ #
325
290
326
291
#######################################################################
327
292
# Printing `resp.json()` will now show the following:
@@ -330,7 +295,6 @@ def get_prediction(image_bytes):
330
295
#
331
296
# {"class_id": "n02124075", "class_name": "Egyptian_cat"}
332
297
#
333
-
334
298
######################################################################
335
299
# Next steps
336
300
# --------------
@@ -368,3 +332,4 @@ def get_prediction(image_bytes):
368
332
#
369
333
# - Finally, we encourage you to check out our other tutorials on deploying PyTorch models
370
334
# linked-to at the top of the page.
335
+ #
0 commit comments