From e68227340db31e6d03ab421c9e1fb5eb8ffbf598 Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Mon, 24 Aug 2020 12:51:03 -0400 Subject: [PATCH 01/10] wip --- _config.yml | 3 ++ _includes/blog_jumbotron.html | 16 +++++++++ _layouts/blog.html | 68 +---------------------------------- blog.html | 10 ------ blog/index.html | 33 +++++++++++++++++ blog/landing-page.html | 33 +++++++++++++++++ 6 files changed, 86 insertions(+), 77 deletions(-) create mode 100644 _includes/blog_jumbotron.html delete mode 100644 blog.html create mode 100644 blog/index.html create mode 100644 blog/landing-page.html diff --git a/_config.yml b/_config.yml index a19288554894..c53fd5e8d3d2 100644 --- a/_config.yml +++ b/_config.yml @@ -64,6 +64,9 @@ collections: output: true news: output: true + blog: + output: true + permalink: /blog/:path/ pagination: enabled: true diff --git a/_includes/blog_jumbotron.html b/_includes/blog_jumbotron.html new file mode 100644 index 000000000000..7bf899214e14 --- /dev/null +++ b/_includes/blog_jumbotron.html @@ -0,0 +1,16 @@ +
+
+ {% for post in include.posts limit:1 %} +

Featured Post

+

+ {{ post.title }} +

+

' | remove: '

' | truncate: 100 }}

+ + + Read More + + + {% endfor %} +
+
diff --git a/_layouts/blog.html b/_layouts/blog.html index 85b8ad1c40b1..27793f6621c0 100644 --- a/_layouts/blog.html +++ b/_layouts/blog.html @@ -7,73 +7,7 @@
- {% assign posts = paginator.posts %} - {% assign display_post_categories = site.posts | map: 'categories' | join: ',' | replace: '-', ' ' | split: ',' | uniq | sort %} - {% assign current_page = page.url | downcase | remove: ".html" | split: '/' %} - {% assign post_categories = site.posts | map: 'categories' | join: ',' | split: ',' | uniq | sort %} - -
-
- {% for post in posts limit:1 %} -

Featured Post

-

- {{ post.title }} -

-

' | remove: '

' | truncate: 100 }}

- - - Read More - - - {% endfor %} -
-
- -
-
-
- - - - -
- {% for post in posts %} -
-

{{ post.date | date: '%B %d, %Y' }}

-

- {{ post.title }} -

-

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

- -
- {% endfor %} -
- - {% include pagination_buttons.html %} -
-
-
+ {{ content }} {% include quick_start_module.html %} diff --git a/blog.html b/blog.html deleted file mode 100644 index e39a2a2a555c..000000000000 --- a/blog.html +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: blog -title: Blog -permalink: /blog/ -body-class: blog -redirect_from: "/blog/categories/" -pagination: - enabled: true - permalink: /:num/ ---- diff --git a/blog/index.html b/blog/index.html new file mode 100644 index 000000000000..481d3ff8d57d --- /dev/null +++ b/blog/index.html @@ -0,0 +1,33 @@ +--- +layout: blog +title: Blog +permalink: /blog/all-posts +body-class: blog +--- + +{% assign posts = site.posts %} + +{% include blog_jumbotron.html posts=posts %} + +
+
+
+
+ {% for post in posts %} +
+

{{ post.date | date: '%B %d, %Y' }}

+

+ {{ post.title }} +

+

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

+ +
+ {% endfor %} +
+
+
+
+ + + + diff --git a/blog/landing-page.html b/blog/landing-page.html new file mode 100644 index 000000000000..f82345400993 --- /dev/null +++ b/blog/landing-page.html @@ -0,0 +1,33 @@ +--- +layout: blog +title: Blog +permalink: /blog/ +body-class: blog +redirect_from: "/blog/categories/" +pagination: + enabled: true + permalink: /:num/ +--- + +{% assign posts = paginator.posts %} + +{% include blog_jumbotron.html posts=posts %} + +
+
+
+
+ {% for post in posts %} +
+

{{ post.date | date: '%B %d, %Y' }}

+

+ {{ post.title }} +

+

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

+ +
+ {% endfor %} +
+
+
+
From 3bed7853f95a6a3a44f380a3021de0051e2aa11c Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Wed, 26 Aug 2020 09:39:50 -0400 Subject: [PATCH 02/10] Update button design --- _sass/blog.scss | 13 +++++++++++-- blog/index.html | 1 - blog/landing-page.html | 4 ++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/_sass/blog.scss b/_sass/blog.scss index 7e898e502bf0..7a3a1ffa61e1 100644 --- a/_sass/blog.scss +++ b/_sass/blog.scss @@ -139,7 +139,7 @@ overflow: unset; white-space: unset; text-overflow: unset; - } + } } h1 { @@ -221,7 +221,7 @@ } } - .page-link { + .page-link, .all-blogs { font-size: rem(20px); letter-spacing: 0; line-height: rem(34px); @@ -230,6 +230,15 @@ text-align: center; } + .all-blogs { + width: inherit; + padding: 0.5rem 3.75rem; + color: $dark_grey; + &:hover { + color: $orange; + } + } + @media (max-width: 1067px) { .jumbotron { h1 { diff --git a/blog/index.html b/blog/index.html index 481d3ff8d57d..1a3505711aac 100644 --- a/blog/index.html +++ b/blog/index.html @@ -28,6 +28,5 @@

- diff --git a/blog/landing-page.html b/blog/landing-page.html index f82345400993..572fdc94e746 100644 --- a/blog/landing-page.html +++ b/blog/landing-page.html @@ -28,6 +28,10 @@

{% endfor %} + + From 8377d21b24fe972d016608d5e7b4fdef3629e7ab Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Wed, 26 Aug 2020 14:10:27 -0400 Subject: [PATCH 03/10] update --- ...2017-5-11-a-tour-of-pytorch-internals-1.md | 1 + ...2017-6-27-a-tour-of-pytorch-internals-2.md | 1 + _posts/2018-01-19-a-year-in.md | 1 + _posts/2018-03-5-tensor-comprehensions.md | 1 + ...018-04-22-pytorch-0_4_0-migration-guide.md | 1 + _posts/2018-05-2-the-road-to-1_0.md | 1 + _posts/2019-05-08-model-serving-in-pyorch.md | 7 ++- .../2019-05-1-pytorch-adds-new-dev-tools.md | 1 + ...-reproducible-research-with-pytorch-hub.md | 1 + _posts/2019-07-18-pytorch-ecosystem.md | 1 + _posts/2019-07-23-mapillary-research.md | 1 + ...8-08-pytorch-1.2-and-domain-api-release.md | 1 + ...-privacy-quantization-and-named-tensors.md | 1 + ...wship-funding-for-privacy-preserving-ml.md | 1 + ...mes-preferred-networks-to-its-community.md | 1 + ...-stochastic-weight-averaging-in-pytorch.md | 1 + ...-1-optimizing-cuda-rnn-with-torchscript.md | 37 ++++++------ _posts/2019-5-22-torchvision03.md | 1 + ...-with-pytorch-automatic-mixed-precision.md | 59 ++++++++++--------- ...ainer-of-the-windows-version-of-pytorch.md | 4 +- ...-pytorch-feature-classification-changes.md | 1 + ...for-large-datasets-many-files-many-gpus.md | 7 ++- ...ow-includes-stochastic-weight-averaging.md | 39 ++++++------ ...4-released-and-domain-libraries-updated.md | 3 +- ...introduction-to-quantization-on-pytorch.md | 1 + ...ot-5-released-with-new-and-updated-apis.md | 1 + ...brary-updates-new-model-serving-library.md | 15 ++--- ...dates-improvements-to-pytorch-tutorials.md | 15 ++--- _posts/2020-7-28-pytorch-1.6-released.md | 47 +++++++-------- _sass/blog.scss | 18 +++++- blog/index.html | 32 ---------- blog/landing-page.html | 16 ++--- 32 files changed, 164 insertions(+), 154 deletions(-) delete mode 100644 blog/index.html diff --git a/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md b/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md index a29ced22ed3e..2d43f20b5bdf 100644 --- a/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md +++ b/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md @@ -4,6 +4,7 @@ title: "A Tour of PyTorch Internals (Part I)" author: "Trevor Killeen" date: 2017-05-11 12:00:00 -0500 redirect_from: /2017/05/11/Internals.html +image: /assets/images/bert2.png --- The fundamental unit in PyTorch is the Tensor. This post will serve as an overview for how we implement Tensors in PyTorch, such that the user can interact with it from the Python shell. In particular, we want to answer four main questions: diff --git a/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md b/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md index a396981dc278..40ea89694fbe 100644 --- a/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md +++ b/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md @@ -4,6 +4,7 @@ title: "PyTorch Internals Part II - The Build System" author: "Trevor Killeen" date: 2017-06-27 12:00:00 -0500 redirect_from: /2017/06/27/Internals2.html +image: /assets/images/bert2.png --- In the first [post]({{ site.baseurl }}{% link _posts/2017-5-11-a-tour-of-pytorch-internals-1.md %}) I explained how we generate a `torch.Tensor` object that you can use in your Python interpreter. Next, I will explore the build system for PyTorch. The PyTorch codebase has a variety of components: diff --git a/_posts/2018-01-19-a-year-in.md b/_posts/2018-01-19-a-year-in.md index 86647d110bce..e2fa460b2a20 100644 --- a/_posts/2018-01-19-a-year-in.md +++ b/_posts/2018-01-19-a-year-in.md @@ -4,6 +4,7 @@ title: "PyTorch, a year in...." author: "The PyTorch Team" date: 2018-01-19 12:00:00 -0500 redirect_from: /2018/01/19/a-year-in.html +image: /assets/images/bert2.png --- Today marks 1 year since PyTorch was released publicly. It's been a wild ride — our quest to build a flexible deep learning research platform. Over the last year, we've seen an amazing community of people using, contributing to and evangelizing PyTorch — thank you for the love. diff --git a/_posts/2018-03-5-tensor-comprehensions.md b/_posts/2018-03-5-tensor-comprehensions.md index a777c076a432..a3fb12e28a27 100644 --- a/_posts/2018-03-5-tensor-comprehensions.md +++ b/_posts/2018-03-5-tensor-comprehensions.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Tensor Comprehensions in PyTorch' author: Priya Goyal (FAIR), Nicolas Vasilache (FAIR), Oleksandr Zinenko (Inria & DI ENS), Theodoros Theodoridis (ETH Zürich), Zachary DeVito (FAIR), William S. Moses (MIT CSAIL), Sven Verdoolaege (FAIR), Andrew Adams (FAIR), Albert Cohen (Inria & DI ENS & FAIR) redirect_from: /2018/03/05/tensor-comprehensions.html +image: /assets/images/bert2.png --- Tensor Comprehensions (TC) is a tool that lowers the barrier for writing high-performance code. It generates GPU code from a simple high-level language and autotunes the code for specific input sizes. diff --git a/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md b/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md index 39aced0791ed..2e9a7421f190 100644 --- a/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md +++ b/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch 0.4.0 Migration Guide' redirect_from: /2018/04/22/0_4_0-migration-guide.html +image: /assets/images/bert2.png --- Welcome to the migration guide for PyTorch 0.4.0. In this release we introduced [many exciting new features and critical bug fixes](https://github.com/pytorch/pytorch/releases/tag/v0.4.0), with the goal of providing users a better and cleaner interface. In this guide, we will cover the most important changes in migrating existing code from previous versions: diff --git a/_posts/2018-05-2-the-road-to-1_0.md b/_posts/2018-05-2-the-road-to-1_0.md index df93b5731974..61d6edfd151f 100644 --- a/_posts/2018-05-2-the-road-to-1_0.md +++ b/_posts/2018-05-2-the-road-to-1_0.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'The road to 1.0: production ready PyTorch' author: The PyTorch Team redirect_from: /2018/05/02/road-to-1.0.html +image: /assets/images/bert2.png --- We would like to give you a preview of the roadmap for PyTorch 1.0 , the next release of PyTorch. Over the last year, we've had 0.2, 0.3 and 0.4 transform PyTorch from a [Torch+Chainer]-like interface into something cleaner, adding double-backwards, numpy-like functions, advanced indexing and removing Variable boilerplate. At this time, we're confident that the API is in a reasonable and stable state to confidently release a 1.0. diff --git a/_posts/2019-05-08-model-serving-in-pyorch.md b/_posts/2019-05-08-model-serving-in-pyorch.md index c25b1c89f7ab..5a091b12b72c 100644 --- a/_posts/2019-05-08-model-serving-in-pyorch.md +++ b/_posts/2019-05-08-model-serving-in-pyorch.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Model Serving in PyTorch' author: Jeff Smith redirect_from: /2019/05/08/model-serving-in-pyorch.html +image: /assets/images/bert2.png --- PyTorch has seen a lot of adoption in research, but people can get confused about how well PyTorch models can be taken into production. This blog post is meant to clear up any confusion people might have about the road to production in PyTorch. @@ -41,10 +42,10 @@ The above is a somewhat arbitrary breakdown of different approaches based on a s So, if you're a PyTorch user, what should you use if you want to take your models to production? -If you're on mobile or working on an embedded system like a robot, direct embedding in your application is often the right choice. +If you're on mobile or working on an embedded system like a robot, direct embedding in your application is often the right choice. For mobile specifically, your use case might be served by the ONNX export functionality. Note that ONNX, by its very nature, has limitations and doesn't support all of the functionality provided by the larger PyTorch project. -You can check out [this tutorial](https://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html) on deploying PyTorch models to mobile using ONNX to see if this path might suit your use case. +You can check out [this tutorial](https://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html) on deploying PyTorch models to mobile using ONNX to see if this path might suit your use case. That said, we've heard that there's a lot more that PyTorch users want to do on mobile, so look for more mobile-specific functionality in PyTorch in the future. For other embedded systems, like robots, running [inference on a PyTorch model from the C++ API](https://pytorch.org/tutorials/advanced/cpp_export.html) could be the right solution. @@ -52,7 +53,7 @@ If you can't use the cloud or prefer to manage all services using the same techn If you want to manage multiple models within a non-cloud service solution, there are teams developing PyTorch support in model servers like [MLFlow](https://mlflow.org/), [Kubeflow](https://www.kubeflow.org/), and [RedisAI.](https://oss.redislabs.com/redisai/) We're excited to see innovation from multiple teams building OSS model servers, and we'll continue to highlight innovation in the PyTorch ecosystem in the future. -If you can use the cloud for your application, there are several great choices for working with models in the cloud. For AWS Sagemaker, you can start find a guide to [all of the resources from AWS for working with PyTorch](https://docs.aws.amazon.com/sagemaker/latest/dg/pytorch.html), including docs on how to use the [Sagemaker Python SDK](https://sagemaker.readthedocs.io/en/stable/using_pytorch.html). You can also see [some](https://youtu.be/5h1Ot2dPi2E) [talks](https://youtu.be/qc5ZikKw9_w) we've given on using PyTorch on Sagemaker. Finally, if you happen to be using PyTorch via FastAI, then they've written a [really simple guide](https://course.fast.ai/deployment_amzn_sagemaker.html) to getting up and running on Sagemaker. +If you can use the cloud for your application, there are several great choices for working with models in the cloud. For AWS Sagemaker, you can start find a guide to [all of the resources from AWS for working with PyTorch](https://docs.aws.amazon.com/sagemaker/latest/dg/pytorch.html), including docs on how to use the [Sagemaker Python SDK](https://sagemaker.readthedocs.io/en/stable/using_pytorch.html). You can also see [some](https://youtu.be/5h1Ot2dPi2E) [talks](https://youtu.be/qc5ZikKw9_w) we've given on using PyTorch on Sagemaker. Finally, if you happen to be using PyTorch via FastAI, then they've written a [really simple guide](https://course.fast.ai/deployment_amzn_sagemaker.html) to getting up and running on Sagemaker. The story is similar across other major clouds. On Google Cloud, you can follow [these instructions](https://cloud.google.com/deep-learning-vm/docs/pytorch_start_instance) to get access to a Deep Learning VM with PyTorch pre-installed. On Microsoft Azure, you have a number of ways to get started from [Azure Machine Learning Service](https://azure.microsoft.com/en-us/services/machine-learning-service/) to [Azure Notebooks](https://notebooks.azure.com/pytorch/projects/tutorials) showing how to use PyTorch. diff --git a/_posts/2019-05-1-pytorch-adds-new-dev-tools.md b/_posts/2019-05-1-pytorch-adds-new-dev-tools.md index 855ef785ad0e..0588763c4814 100644 --- a/_posts/2019-05-1-pytorch-adds-new-dev-tools.md +++ b/_posts/2019-05-1-pytorch-adds-new-dev-tools.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch adds new dev tools as it hits production scale' author: The PyTorch Team +image: /assets/images/bert2.png --- _This is a partial re-post of the original blog post on the Facebook AI Blog. The full post can be [viewed here](https://ai.facebook.com/blog/pytorch-adds-new-dev-tools-as-it-hits-production-scale/)_ diff --git a/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md b/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md index 3bdd2db84dbe..e7e74f430db7 100644 --- a/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md +++ b/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Towards Reproducible Research with PyTorch Hub' author: Team PyTorch redirect_from: /2019/06/10/pytorch_hub.html +image: /assets/images/bert2.png --- Reproducibility is an essential requirement for many fields of research including those based on machine learning techniques. However, many machine learning publications are either not reproducible or are difficult to reproduce. With the continued growth in the number of research publications, including tens of thousands of papers now hosted on arXiv and submissions to conferences at an all time high, research reproducibility is more important than ever. While many of these publications are accompanied by code as well as trained models which is helpful but still leaves a number of steps for users to figure out for themselves. diff --git a/_posts/2019-07-18-pytorch-ecosystem.md b/_posts/2019-07-18-pytorch-ecosystem.md index 3b87f2f10d0f..b0cf03ad50a7 100644 --- a/_posts/2019-07-18-pytorch-ecosystem.md +++ b/_posts/2019-07-18-pytorch-ecosystem.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch Adds New Ecosystem Projects for Encrypted AI and Quantum Computing, Expands PyTorch Hub' author: Team PyTorch +image: /assets/images/bert2.png --- The PyTorch ecosystem includes projects, tools, models and libraries from a broad community of researchers in academia and industry, application developers, and ML engineers. The goal of this ecosystem is to support, accelerate, and aid in your exploration with PyTorch and help you push the state of the art, no matter what field you are exploring. Similarly, we are expanding the recently launched PyTorch Hub to further help you discover and reproduce the latest research. diff --git a/_posts/2019-07-23-mapillary-research.md b/_posts/2019-07-23-mapillary-research.md index ee01ffcbd9cc..e15f87708e40 100644 --- a/_posts/2019-07-23-mapillary-research.md +++ b/_posts/2019-07-23-mapillary-research.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Mapillary Research: Seamless Scene Segmentation and In-Place Activated BatchNorm' author: Lorenzo Porzi, Mapillary redirect_from: /2019/07/23/mapillary-research.html +image: /assets/images/bert2.png --- With roads in developed countries like the US changing up to 15% annually, Mapillary addresses a growing demand for keeping maps updated by combining images from any camera into a 3D visualization of the world. Mapillary's independent and collaborative approach enables anyone to collect, share, and use street-level images for improving maps, developing cities, and advancing the automotive industry. diff --git a/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md b/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md index 5e8ce05d52f8..06d443b758a5 100644 --- a/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md +++ b/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'New Releases: PyTorch 1.2, torchtext 0.4, torchaudio 0.3, and torchvision 0.4' author: Team PyTorch redirect_from: /2019/08/06/pytorch_aug2019_releases.html +image: /assets/images/bert2.png --- Since the release of PyTorch 1.0, we’ve seen the community expand to add new tools, contribute to a growing set of models available in the PyTorch Hub, and continually increase usage in both research and production. diff --git a/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md b/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md index 3fa1d06bd88a..95838fe3a275 100644 --- a/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md +++ b/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch 1.3 adds mobile, privacy, quantization, and named tensors' author: Team PyTorch +image: /assets/images/bert2.png --- PyTorch continues to gain momentum because of its focus on meeting the needs of researchers, its streamlined workflow for production use, and most of all because of the enthusiastic support it has received from the AI community. PyTorch citations in papers on ArXiv [grew 194 percent in the first half of 2019 alone, as noted by O’Reilly](https://www.oreilly.com/ideas/one-simple-graphic-researchers-love-pytorch-and-tensorflow?fbclid=IwAR3kYmlyD7zky37IYFu0cafQn7yemhl8P-7MNyB30z0q5RDzxcTOrP8kxDk), and the number of contributors to the platform has grown more than 50 percent over the last year, to nearly 1,200. Facebook, Microsoft, Uber, and other organizations across industries are increasingly using it as the foundation for their most important machine learning (ML) research and production workloads. diff --git a/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md b/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md index 2dd9682c9f43..db63192b28bc 100644 --- a/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md +++ b/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'OpenMined and PyTorch partner to launch fellowship funding for privacy-preserving ML community' author: Andrew Trask (OpenMined/U.Oxford), Shubho Sengupta, Laurens van der Maaten, Joe Spisak excerpt: Many applications of machine learning (ML) pose a range of security and privacy challenges. +image: /assets/images/bert2.png ---
diff --git a/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md b/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md index 0a13979349de..92f1eace1811 100644 --- a/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md +++ b/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch adds new tools and libraries, welcomes Preferred Networks to its community' author: Team PyTorch +image: /assets/images/bert2.png --- PyTorch continues to be used for the latest state-of-the-art research on display at the NeurIPS conference next week, making up nearly [70% of papers](https://chillee.github.io/pytorch-vs-tensorflow/) that cite a framework. In addition, we’re excited to welcome Preferred Networks, the maintainers of the Chainer framework, to the PyTorch community. Their teams are moving fully over to PyTorch for developing their ML capabilities and services. diff --git a/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md b/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md index a610776b0c2d..d218f78955f5 100644 --- a/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md +++ b/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Stochastic Weight Averaging in PyTorch' author: Pavel Izmailov and Andrew Gordon Wilson redirect_from: /2019/04/29/road-to-1.0.html +image: /assets/images/bert2.png --- In this blogpost we describe the recently proposed Stochastic Weight Averaging (SWA) technique [1, 2], and its new implementation in [`torchcontrib`](https://github.com/pytorch/contrib). SWA is a simple procedure that improves generalization in deep learning over Stochastic Gradient Descent (SGD) at no additional cost, and can be used as a drop-in replacement for any other optimizer in PyTorch. SWA has a wide range of applications and features: diff --git a/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md b/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md index ce4d7e255a42..7e7274e5b86e 100644 --- a/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md +++ b/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md @@ -3,11 +3,12 @@ layout: blog_detail title: "Optimizing CUDA Recurrent Neural Networks with TorchScript" author: "The PyTorch Team" date: 2019-05-01 8:00:00 -0500 +image: /assets/images/bert2.png --- -This week, we officially released PyTorch 1.1, a large feature update to PyTorch 1.0. One of the new features we've added is better support for fast, custom Recurrent Neural Networks (fastrnns) with TorchScript (the PyTorch JIT) (https://pytorch.org/docs/stable/jit.html). +This week, we officially released PyTorch 1.1, a large feature update to PyTorch 1.0. One of the new features we've added is better support for fast, custom Recurrent Neural Networks (fastrnns) with TorchScript (the PyTorch JIT) (https://pytorch.org/docs/stable/jit.html). -RNNs are popular models that have shown good performance on a variety of NLP tasks that come in different shapes and sizes. PyTorch implements a number of the most popular ones, the [Elman RNN](https://pytorch.org/docs/master/nn.html?highlight=rnn#torch.nn.RNN), [GRU](https://pytorch.org/docs/master/nn.html?highlight=gru#torch.nn.GRU), and [LSTM](https://pytorch.org/docs/master/nn.html?highlight=lstm#torch.nn.LSTM) as well as multi-layered and bidirectional variants. +RNNs are popular models that have shown good performance on a variety of NLP tasks that come in different shapes and sizes. PyTorch implements a number of the most popular ones, the [Elman RNN](https://pytorch.org/docs/master/nn.html?highlight=rnn#torch.nn.RNN), [GRU](https://pytorch.org/docs/master/nn.html?highlight=gru#torch.nn.GRU), and [LSTM](https://pytorch.org/docs/master/nn.html?highlight=lstm#torch.nn.LSTM) as well as multi-layered and bidirectional variants. However, many users want to implement their own custom RNNs, taking ideas from recent literature. Applying [Layer Normalization](https://arxiv.org/abs/1607.06450) to LSTMs is one such use case. Because the PyTorch CUDA LSTM implementation uses a fused kernel, it is difficult to insert normalizations or even modify the base LSTM implementation. Many users have turned to writing custom implementations using standard PyTorch operators, but such code suffers from high overhead: most PyTorch operations launch at least one kernel on the GPU and RNNs generally run many operations due to their recurrent nature. However, we can apply TorchScript to fuse operations and optimize our code automatically, launching fewer, more optimized kernels on the GPU. @@ -21,7 +22,7 @@ We are constantly improving our infrastructure on trying to make the performance 1. If the customized operations are all element-wise, that's great because you can get the benefits of the PyTorch JIT's operator fusion automatically! -2. If you have more complex operations (e.g. reduce ops mixed with element-wise ops), consider grouping the reduce operations and element-wise ops separately in order to fuse the element-wise operations into a single fusion group. +2. If you have more complex operations (e.g. reduce ops mixed with element-wise ops), consider grouping the reduce operations and element-wise ops separately in order to fuse the element-wise operations into a single fusion group. 3. If you want to know about what has been fused in your custom RNN, you can inspect the operation's optimized graph by using `graph_for` . Using `LSTMCell` as an example: @@ -87,7 +88,7 @@ We are constantly improving our infrastructure on trying to make the performance return (%hy, %4, %cy, %outgate.1, %cellgate.1, %forgetgate.1, %ingate.1) ``` -From the above graph we can see that it has a `prim::FusionGroup_0` subgraph that is fusing all element-wise operations in LSTMCell (transpose and matrix multiplication are not element-wise ops). Some graph nodes might be hard to understand in the first place but we will explain some of them in the optimization section, we also omitted some long verbose operators in this post that is there just for correctness. +From the above graph we can see that it has a `prim::FusionGroup_0` subgraph that is fusing all element-wise operations in LSTMCell (transpose and matrix multiplication are not element-wise ops). Some graph nodes might be hard to understand in the first place but we will explain some of them in the optimization section, we also omitted some long verbose operators in this post that is there just for correctness. ## Variable-length sequences best practices @@ -108,7 +109,7 @@ Of course, `output` may have some garbage data in the padded regions; use `lengt ## Optimizations -We will now explain the optimizations performed by the PyTorch JIT to speed up custom RNNs. We will use a simple custom LSTM model in TorchScript to illustrate the optimizations, but many of these are general and apply to other RNNs. +We will now explain the optimizations performed by the PyTorch JIT to speed up custom RNNs. We will use a simple custom LSTM model in TorchScript to illustrate the optimizations, but many of these are general and apply to other RNNs. To illustrate the optimizations we did and how we get benefits from those optimizations, we will run a simple custom LSTM model written in TorchScript (you can refer the code in the custom_lstm.py or the below code snippets) and time our changes. @@ -119,10 +120,10 @@ input_size = 512 hidden_size = 512 mini_batch = 64 numLayers = 1 -seq_length = 100 +seq_length = 100 ``` -The most important thing PyTorch JIT did is to compile the python program to a PyTorch JIT IR, which is an intermediate representation used to model the program's graph structure. This IR can then benefit from whole program optimization, hardware acceleration and overall has the potential to provide large computation gains. In this example, we run the initial TorchScript model with only compiler optimization passes that are provided by the JIT, including common subexpression elimination, constant pooling, constant propagation, dead code elimination and some peephole optimizations. We run the model training for 100 times after warm up and average the training time. The initial results for model forward time is around 27ms and backward time is around 64ms, which is a bit far away from what PyTorch cuDNN LSTM provided. Next we will explain the major optimizations we did on how we improve the performance on training or inferencing, starting with LSTMCell and LSTMLayer, and some misc optimizations. +The most important thing PyTorch JIT did is to compile the python program to a PyTorch JIT IR, which is an intermediate representation used to model the program's graph structure. This IR can then benefit from whole program optimization, hardware acceleration and overall has the potential to provide large computation gains. In this example, we run the initial TorchScript model with only compiler optimization passes that are provided by the JIT, including common subexpression elimination, constant pooling, constant propagation, dead code elimination and some peephole optimizations. We run the model training for 100 times after warm up and average the training time. The initial results for model forward time is around 27ms and backward time is around 64ms, which is a bit far away from what PyTorch cuDNN LSTM provided. Next we will explain the major optimizations we did on how we improve the performance on training or inferencing, starting with LSTMCell and LSTMLayer, and some misc optimizations. ### LSTM Cell (forward) @@ -159,11 +160,11 @@ class LSTMCell(jit.ScriptModule): ``` -This graph representation (IR) that TorchScript generated enables several optimizations and scalable computations. In addition to the typical compiler optimizations that we could do (CSE, constant propagation, etc. ) we can also run other IR transformations to make our code run faster. +This graph representation (IR) that TorchScript generated enables several optimizations and scalable computations. In addition to the typical compiler optimizations that we could do (CSE, constant propagation, etc. ) we can also run other IR transformations to make our code run faster. * Element-wise operator fusion. PyTorch JIT will automatically fuse element-wise ops, so when you have adjacent operators that are all element-wise, JIT will automatically group all those operations together into a single FusionGroup, this FusionGroup can then be launched with a single GPU/CPU kernel and performed in one pass. This avoids expensive memory reads and writes for each operation. * Reordering chunks and pointwise ops to enable more fusion. An LSTM cell adds gates together (a pointwise operation), and then chunks the gates into four pieces: the ifco gates. Then, it performs pointwise operations on the ifco gates like above. This leads to two fusion groups in practice: one fusion group for the element-wise ops pre-chunk, and one group for the element-wise ops post-chunk. - The interesting thing to note here is that pointwise operations commute with `torch.chunk`: Instead of performing pointwise ops on some input tensors and chunking the output, we can chunk the input tensors and then perform the same pointwise ops on the output tensors. By moving the chunk to before the first fusion group, we can merge the first and second fusion groups into one big group. + The interesting thing to note here is that pointwise operations commute with `torch.chunk`: Instead of performing pointwise ops on some input tensors and chunking the output, we can chunk the input tensors and then perform the same pointwise ops on the output tensors. By moving the chunk to before the first fusion group, we can merge the first and second fusion groups into one big group.
@@ -171,7 +172,7 @@ This graph representation (IR) that TorchScript generated enables several optimi * Tensor creation on the CPU is expensive, but there is ongoing work to make it faster. At this point, a LSTMCell runs three CUDA kernels: two `gemm` kernels and one for the single pointwise group. One of the things we noticed was that there was a large gap between the finish of the second `gemm` and the start of the single pointwise group. This gap was a period of time when the GPU was idling around and not doing anything. Looking into it more, we discovered that the problem was that `torch.chunk` constructs new tensors and that tensor construction was not as fast as it could be. Instead of constructing new Tensor objects, we taught the fusion compiler how to manipulate a data pointer and strides to do the `torch.chunk` before sending it into the fused kernel, shrinking the amount of idle time between the second gemm and the launch of the element-wise fusion group. This give us around 1.2x increase speed up on the LSTM forward pass. -By doing the above tricks, we are able to fuse the almost all `LSTMCell` forward graph (except the two gemm kernels) into a single fusion group, which corresponds to the `prim::FusionGroup_0` in the above IR graph. It will then be launched into a single fused kernel for execution. With these optimizations the model performance improves significantly with average forward time reduced by around 17ms (1.7x speedup) to 10ms, and average backward time reduce by 37ms to 27ms (1.37x speed up). +By doing the above tricks, we are able to fuse the almost all `LSTMCell` forward graph (except the two gemm kernels) into a single fusion group, which corresponds to the `prim::FusionGroup_0` in the above IR graph. It will then be launched into a single fused kernel for execution. With these optimizations the model performance improves significantly with average forward time reduced by around 17ms (1.7x speedup) to 10ms, and average backward time reduce by 37ms to 27ms (1.37x speed up). ### LSTM Layer (forward) @@ -195,31 +196,31 @@ class LSTMLayer(jit.ScriptModule): We did several tricks on the IR we generated for TorchScript LSTM to boost the performance, some example optimizations we did: * Loop Unrolling: We automatically unroll loops in the code (for big loops, we unroll a small subset of it), which then empowers us to do further optimizations on the for loops control flow. For example, the fuser can fuse together operations across iterations of the loop body, which results in a good performance improvement for control flow intensive models like LSTMs. -* Batch Matrix Multiplication: For RNNs where the input is pre-multiplied (i.e. the model has a lot of matrix multiplies with the same LHS or RHS), we can efficiently batch those operations together into a single matrix multiply while chunking the outputs to achieve equivalent semantics. +* Batch Matrix Multiplication: For RNNs where the input is pre-multiplied (i.e. the model has a lot of matrix multiplies with the same LHS or RHS), we can efficiently batch those operations together into a single matrix multiply while chunking the outputs to achieve equivalent semantics. -By applying these techniques, we reduced our time in the forward pass by an additional 1.6ms to 8.4ms (1.2x speed up) and timing in backward by 7ms to around 20ms (1.35x speed up). +By applying these techniques, we reduced our time in the forward pass by an additional 1.6ms to 8.4ms (1.2x speed up) and timing in backward by 7ms to around 20ms (1.35x speed up). ### LSTM Layer (backward) * “Tree” Batch Matrix Muplication: It is often the case that a single weight is reused multiple times in the LSTM backward graph, forming a tree where the leaves are matrix multiplies and nodes are adds. These nodes can be combined together by concatenating the LHSs and RHSs in different dimensions, then computed as a single matrix multiplication. The formula of equivalence can be denoted as follows: - + $L1 * R1 + L2 * R2 = torch.cat((L1, L2), dim=1) * torch.cat((R1, R2), dim=0)$ - -* Autograd is a critical component of what makes PyTorch such an elegant ML framework. As such, we carried this through to PyTorch JIT, but using a new **Automatic Differentiation** (AD) mechanism that works on the IR level. JIT automatic differentiation will slice the forward graph into symbolically differentiable subgraphs, and generate backwards nodes for those subgraphs. Taking the above IR as an example, we group the graph nodes into a single `prim::DifferentiableGraph_0` for the operations that has AD formulas. For operations that have not been added to AD formulas, we will fall back to Autograd during execution. + +* Autograd is a critical component of what makes PyTorch such an elegant ML framework. As such, we carried this through to PyTorch JIT, but using a new **Automatic Differentiation** (AD) mechanism that works on the IR level. JIT automatic differentiation will slice the forward graph into symbolically differentiable subgraphs, and generate backwards nodes for those subgraphs. Taking the above IR as an example, we group the graph nodes into a single `prim::DifferentiableGraph_0` for the operations that has AD formulas. For operations that have not been added to AD formulas, we will fall back to Autograd during execution. * Optimizing the backwards path is hard, and the implicit broadcasting semantics make the optimization of automatic differentiation harder. PyTorch makes it convenient to write tensor operations without worrying about the shapes by broadcasting the tensors for you. For performance, the painful point in backward is that we need to have a summation for such kind of broadcastable operations. This results in the derivative of every broadcastable op being followed by a summation. Since we cannot currently fuse reduce operations, this causes FusionGroups to break into multiple small groups leading to bad performance. To deal with this, refer to this great [post](http://lernapparat.de/fast-lstm-pytorch/) written by Thomas Viehmann. ### Misc Optimizations * In addition to the steps laid about above, we also eliminated overhead between CUDA kernel launches and unnecessary tensor allocations. One example is when you do a tensor device look up. This can provide some poor performance initially with a lot of unnecessary allocations. When we remove these this results in a reduction from milliseconds to nanoseconds between kernel launches. -* Lastly, there might be normalization applied in the custom LSTMCell like LayerNorm. Since LayerNorm and other normalization ops contains reduce operations, it is hard to fuse it in its entirety. Instead, we automatically decompose Layernorm to a statistics computation (reduce operations) + element-wise transformations, and then fuse those element-wise parts together. As of this post, there are some limitations on our auto differentiation and graph fuser infrastructure which limits the current support to inference mode only. We plan to add backward support in a future release. +* Lastly, there might be normalization applied in the custom LSTMCell like LayerNorm. Since LayerNorm and other normalization ops contains reduce operations, it is hard to fuse it in its entirety. Instead, we automatically decompose Layernorm to a statistics computation (reduce operations) + element-wise transformations, and then fuse those element-wise parts together. As of this post, there are some limitations on our auto differentiation and graph fuser infrastructure which limits the current support to inference mode only. We plan to add backward support in a future release. -With the above optimizations on operation fusion, loop unrolling, batch matrix multiplication and some misc optimizations, we can see a clear performance increase on our custom TorchScript LSTM forward and backward from the following figure: +With the above optimizations on operation fusion, loop unrolling, batch matrix multiplication and some misc optimizations, we can see a clear performance increase on our custom TorchScript LSTM forward and backward from the following figure:
-There are a number of additional optimizations that we did not cover in this post. In addition to the ones laid out in this post, we now see that our custom LSTM forward pass is on par with cuDNN. We are also working on optimizing backward more and expect to see improvements in future releases. Besides the speed that TorchScript provides, we introduced a much more flexible API that enable you to hand draft a lot more custom RNNs, which cuDNN could not provide. +There are a number of additional optimizations that we did not cover in this post. In addition to the ones laid out in this post, we now see that our custom LSTM forward pass is on par with cuDNN. We are also working on optimizing backward more and expect to see improvements in future releases. Besides the speed that TorchScript provides, we introduced a much more flexible API that enable you to hand draft a lot more custom RNNs, which cuDNN could not provide. diff --git a/_posts/2019-5-22-torchvision03.md b/_posts/2019-5-22-torchvision03.md index eb807b4394b3..409f0ffc766f 100644 --- a/_posts/2019-5-22-torchvision03.md +++ b/_posts/2019-5-22-torchvision03.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'torchvision 0.3: segmentation, detection models, new datasets and more..' author: Francisco Massa redirect_from: /2019/05/23/torchvision03.html +image: /assets/images/bert2.png --- PyTorch domain libraries like torchvision provide convenient access to common datasets and models that can be used to quickly create a state-of-the-art baseline. Moreover, they also provide common abstractions to reduce boilerplate code that users might have to otherwise repeatedly write. The torchvision 0.3 release brings several new features including models for semantic segmentation, object detection, instance segmentation, and person keypoint detection, as well as custom C++ / CUDA ops specific to computer vision. diff --git a/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md b/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md index 3a45e36b35fa..be064e484b36 100644 --- a/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md +++ b/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'Introducing native PyTorch automatic mixed precision for faster training on NVIDIA GPUs' author: Mengdi Huang, Chetan Tekur, Michael Carilli +image: /assets/images/bert2.png --- Most deep learning frameworks, including PyTorch, train with 32-bit floating point (FP32) arithmetic by default. However this is not essential to achieve full accuracy for many deep learning models. In 2017, NVIDIA researchers developed a methodology for [mixed-precision training](https://developer.nvidia.com/blog/mixed-precision-training-deep-neural-networks/), which combined [single-precision](https://blogs.nvidia.com/blog/2019/11/15/whats-the-difference-between-single-double-multi-and-mixed-precision-computing/) (FP32) with half-precision (e.g. FP16) format when training a network, and achieved the same accuracy as FP32 training using the same hyperparameters, with additional performance benefits on NVIDIA GPUs: @@ -9,7 +10,7 @@ Most deep learning frameworks, including PyTorch, train with 32-bit floating poi * Shorter training time; * Lower memory requirements, enabling larger batch sizes, larger models, or larger inputs. -In order to streamline the user experience of training in mixed precision for researchers and practitioners, NVIDIA developed [Apex](https://developer.nvidia.com/blog/apex-pytorch-easy-mixed-precision-training/) in 2018, which is a lightweight PyTorch extension with [Automatic Mixed Precision](https://developer.nvidia.com/automatic-mixed-precision) (AMP) feature. This feature enables automatic conversion of certain GPU operations from FP32 precision to mixed precision, thus improving performance while maintaining accuracy. +In order to streamline the user experience of training in mixed precision for researchers and practitioners, NVIDIA developed [Apex](https://developer.nvidia.com/blog/apex-pytorch-easy-mixed-precision-training/) in 2018, which is a lightweight PyTorch extension with [Automatic Mixed Precision](https://developer.nvidia.com/automatic-mixed-precision) (AMP) feature. This feature enables automatic conversion of certain GPU operations from FP32 precision to mixed precision, thus improving performance while maintaining accuracy. For the PyTorch 1.6 release, developers at NVIDIA and Facebook moved mixed precision functionality into PyTorch core as the AMP package, [torch.cuda.amp](https://pytorch.org/docs/stable/amp.html). `torch.cuda.amp` is more flexible and intuitive compared to `apex.amp`. Some of `apex.amp`'s known pain points that `torch.cuda.amp` has been able to fix: @@ -22,7 +23,7 @@ For the PyTorch 1.6 release, developers at NVIDIA and Facebook moved mixed preci * torch.cuda.amp.autocast() has no effect outside regions where it's enabled, so it should serve cases that formerly struggled with multiple calls to [apex.amp.initialize()](https://github.com/NVIDIA/apex/issues/439) (including [cross-validation)](https://github.com/NVIDIA/apex/issues/392#issuecomment-610038073) without difficulty. Multiple convergence runs in the same script should each use a fresh [GradScaler instance](https://github.com/NVIDIA/apex/issues/439#issuecomment-610028282), but GradScalers are lightweight and self-contained so that's not a problem. * Sparse gradient support -With AMP being added to PyTorch core, we have started the process of deprecating `apex.amp.` We have moved `apex.amp` to maintenance mode and will support customers using `apex.amp.` However, we highly encourage `apex.amp` customers to transition to using `torch.cuda.amp` from PyTorch Core. +With AMP being added to PyTorch core, we have started the process of deprecating `apex.amp.` We have moved `apex.amp` to maintenance mode and will support customers using `apex.amp.` However, we highly encourage `apex.amp` customers to transition to using `torch.cuda.amp` from PyTorch Core. # Example Walkthrough Please see official docs for usage: @@ -32,33 +33,33 @@ Please see official docs for usage: Example: ```python -import torch -# Creates once at the beginning of training -scaler = torch.cuda.amp.GradScaler() - -for data, label in data_iter: - optimizer.zero_grad() - # Casts operations to mixed precision - with torch.cuda.amp.autocast(): - loss = model(data) - - # Scales the loss, and calls backward() - # to create scaled gradients - scaler.scale(loss).backward() - - # Unscales gradients and calls - # or skips optimizer.step() - scaler.step(optimizer) - - # Updates the scale for next iteration - scaler.update() +import torch +# Creates once at the beginning of training +scaler = torch.cuda.amp.GradScaler() + +for data, label in data_iter: + optimizer.zero_grad() + # Casts operations to mixed precision + with torch.cuda.amp.autocast(): + loss = model(data) + + # Scales the loss, and calls backward() + # to create scaled gradients + scaler.scale(loss).backward() + + # Unscales gradients and calls + # or skips optimizer.step() + scaler.step(optimizer) + + # Updates the scale for next iteration + scaler.update() ``` # Performance Benchmarks In this section, we discuss the accuracy and performance of mixed precision training with AMP on the latest NVIDIA GPU A100 and also previous generation V100 GPU. The mixed precision performance is compared to FP32 performance, when running Deep Learning workloads in the [NVIDIA pytorch:20.06-py3 container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch?ncid=partn-52193#cid=ngc01_partn_en-us) from NGC. ## Accuracy: AMP (FP16), FP32 -The advantage of using AMP for Deep Learning training is that the models converge to the similar final accuracy while providing improved training performance. To illustrate this point, for [Resnet 50 v1.5 training](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5#training-accuracy-nvidia-dgx-a100-8x-a100-40gb), we see the following accuracy results where higher is better. Please note that the below accuracy numbers are sample numbers that are subject to run to run variance of up to 0.4%. Accuracy numbers for other models including BERT, Transformer, ResNeXt-101, Mask-RCNN, DLRM can be found at [NVIDIA Deep Learning Examples Github](https://github.com/NVIDIA/DeepLearningExamples). +The advantage of using AMP for Deep Learning training is that the models converge to the similar final accuracy while providing improved training performance. To illustrate this point, for [Resnet 50 v1.5 training](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5#training-accuracy-nvidia-dgx-a100-8x-a100-40gb), we see the following accuracy results where higher is better. Please note that the below accuracy numbers are sample numbers that are subject to run to run variance of up to 0.4%. Accuracy numbers for other models including BERT, Transformer, ResNeXt-101, Mask-RCNN, DLRM can be found at [NVIDIA Deep Learning Examples Github](https://github.com/NVIDIA/DeepLearningExamples). Training accuracy: NVIDIA DGX A100 (8x A100 40GB) @@ -78,7 +79,7 @@ Training accuracy: NVIDIA DGX A100 (8x A100 40GB) Training accuracy: NVIDIA DGX-1 (8x V100 16GB) - + @@ -104,7 +105,7 @@ Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
-## Speedup Performance: +## Speedup Performance: ### FP16 on NVIDIA V100 vs. FP32 on V100 AMP with FP16 is the most performant option for DL training on the V100. In Table 1, we can observe that for various models, AMP on V100 provides a speedup of 1.5x to 5.5x over FP32 on V100 while converging to the same final accuracy. @@ -124,8 +125,8 @@ AMP with FP16 remains the most performant option for DL training on the A100. In *Figure 3. Performance of mixed precision training on NVIDIA 8xA100 vs. 8xV100 GPU. Bars represent the speedup factor of A100 over V100. The higher the better.* # Call to action -AMP provides a healthy speedup for Deep Learning training workloads on Nvidia Tensor Core GPUs, especially on the latest Ampere generation A100 GPUs. You can start experimenting with AMP enabled models and model scripts for A100, V100, T4 and other GPUs available at NVIDIA deep learning [examples](https://github.com/NVIDIA/DeepLearningExamples). NVIDIA PyTorch with native AMP support is available from the [PyTorch NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch?ncid=partn-52193#cid=ngc01_partn_en-us) version 20.06. We highly encourage existing `apex.amp` customers to transition to using `torch.cuda.amp` from PyTorch Core available in the latest [PyTorch 1.6 release](https://pytorch.org/blog/pytorch-1.6-released/). +AMP provides a healthy speedup for Deep Learning training workloads on Nvidia Tensor Core GPUs, especially on the latest Ampere generation A100 GPUs. You can start experimenting with AMP enabled models and model scripts for A100, V100, T4 and other GPUs available at NVIDIA deep learning [examples](https://github.com/NVIDIA/DeepLearningExamples). NVIDIA PyTorch with native AMP support is available from the [PyTorch NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch?ncid=partn-52193#cid=ngc01_partn_en-us) version 20.06. We highly encourage existing `apex.amp` customers to transition to using `torch.cuda.amp` from PyTorch Core available in the latest [PyTorch 1.6 release](https://pytorch.org/blog/pytorch-1.6-released/). + + + - - - diff --git a/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md b/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md index 8f74a50570bd..bff167952881 100644 --- a/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md +++ b/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md @@ -2,14 +2,14 @@ layout: blog_detail title: 'Microsoft becomes maintainer of the Windows version of PyTorch' author: Maxim Lukiyanov - Principal PM at Microsoft, Emad Barsoum - Group EM at Microsoft, Guoliang Hua - Principal EM at Microsoft, Nikita Shulga - Tech Lead at Facebook, Geeta Chauhan - PE Lead at Facebook, Chris Gottbrath - Technical PM at Facebook, Jiachen Pu - Engineer at Facebook - +image: /assets/images/bert2.png --- Along with the PyTorch 1.6 release, we are excited to announce that Microsoft has expanded its participation in the PyTorch community and is taking ownership of the development and maintenance of the PyTorch build for Windows. According to the latest [Stack Overflow developer survey](https://insights.stackoverflow.com/survey/2020#technology-developers-primary-operating-systems), Windows remains the primary operating system for the developer community (46% Windows vs 28% MacOS). [Jiachen Pu](https://github.com/peterjc123) initially made a heroic effort to add support for PyTorch on Windows, but due to limited resources, Windows support for PyTorch has lagged behind other platforms. Lack of test coverage resulted in unexpected issues popping up every now and then. Some of the core tutorials, meant for new users to learn and adopt PyTorch, would fail to run. The installation experience was also not as smooth, with the lack of official PyPI support for PyTorch on Windows. Lastly, some of the PyTorch functionality was simply not available on the Windows platform, such as the TorchAudio domain library and distributed training support. To help alleviate this pain, Microsoft is happy to bring its Windows expertise to the table and bring PyTorch on Windows to its best possible self. -In the PyTorch 1.6 release, we have improved the core quality of the Windows build by bringing test coverage up to par with Linux for core PyTorch and its domain libraries and by automating tutorial testing. Thanks to the broader PyTorch community, which contributed TorchAudio support to Windows, we were able to add test coverage to all three domain libraries: TorchVision, TorchText and TorchAudio. In subsequent releases of PyTorch, we will continue improving the Windows experience based on community feedback and requests. So far, the feedback we received from the community points to distributed training support and a better installation experience using pip as the next areas of improvement. +In the PyTorch 1.6 release, we have improved the core quality of the Windows build by bringing test coverage up to par with Linux for core PyTorch and its domain libraries and by automating tutorial testing. Thanks to the broader PyTorch community, which contributed TorchAudio support to Windows, we were able to add test coverage to all three domain libraries: TorchVision, TorchText and TorchAudio. In subsequent releases of PyTorch, we will continue improving the Windows experience based on community feedback and requests. So far, the feedback we received from the community points to distributed training support and a better installation experience using pip as the next areas of improvement. In addition to the native Windows experience, Microsoft released a preview adding [GPU compute support to Windows Subsystem for Linux (WSL) 2](https://blogs.windows.com/windowsdeveloper/2020/06/17/gpu-accelerated-ml-training-inside-the-windows-subsystem-for-linux/) distros, with a focus on enabling AI and ML developer workflows. WSL is designed for developers that want to run any Linux based tools directly on Windows. This preview enables valuable scenarios for a variety of frameworks and Python packages that utilize [NVIDIA CUDA](https://developer.nvidia.com/cuda/wsl) for acceleration and only support Linux. This means WSL customers using the preview can run native Linux based PyTorch applications on Windows unmodified without the need for a traditional virtual machine or a dual boot setup. diff --git a/_posts/2020-07-28-pytorch-feature-classification-changes.md b/_posts/2020-07-28-pytorch-feature-classification-changes.md index 1ace6ef10388..83615f83c379 100644 --- a/_posts/2020-07-28-pytorch-feature-classification-changes.md +++ b/_posts/2020-07-28-pytorch-feature-classification-changes.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch feature classification changes' author: Team PyTorch +image: /assets/images/bert2.png --- Traditionally features in PyTorch were classified as either stable or experimental with an implicit third option of testing bleeding edge features by building master or through installing nightly builds (available via prebuilt whls). This has, in a few cases, caused some confusion around the level of readiness, commitment to the feature and backward compatibility that can be expected from a user perspective. Moving forward, we’d like to better classify the 3 types of features as well as define explicitly here what each mean from a user perspective. diff --git a/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md b/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md index 0f5f5873e26c..0c4f3f14c799 100644 --- a/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md +++ b/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'Efficient PyTorch I/O library for Large Datasets, Many Files, Many GPUs' author: Alex Aizman, Gavin Maltby, Thomas Breuel +image: /assets/images/bert2.png --- Data sets are growing bigger every day and GPUs are getting faster. This means there are more data sets for deep learning researchers and engineers to train and validate their models. @@ -20,7 +21,7 @@ However, working with the large amount of data sets presents a number of challen * **Shuffling and Augmentation:** training data needs to be shuffled and augmented prior to training. * **Scalability:** users often want to develop and test on small datasets and then rapidly scale up to large datasets. -Traditional local and network file systems, and even object storage servers, are not designed for these kinds of applications. [The WebDataset I/O library](https://github.com/tmbdev/webdataset) for PyTorch, together with the optional [AIStore server](https://github.com/NVIDIA/aistore) and [Tensorcom](https://github.com/NVlabs/tensorcom) RDMA libraries, provide an efficient, simple, and standards-based solution to all these problems. The library is simple enough for day-to-day use, is based on mature open source standards, and is easy to migrate to from existing file-based datasets. +Traditional local and network file systems, and even object storage servers, are not designed for these kinds of applications. [The WebDataset I/O library](https://github.com/tmbdev/webdataset) for PyTorch, together with the optional [AIStore server](https://github.com/NVIDIA/aistore) and [Tensorcom](https://github.com/NVlabs/tensorcom) RDMA libraries, provide an efficient, simple, and standards-based solution to all these problems. The library is simple enough for day-to-day use, is based on mature open source standards, and is easy to migrate to from existing file-based datasets. Using WebDataset is simple and requires little effort, and it will let you scale up the same code from running local experiments to using hundreds of GPUs on clusters or in the cloud with linearly scalable performance. Even on small problems and on your desktop, it can speed up I/O tenfold and simplifies data management and processing of large datasets. The rest of this blog post tells you how to get started with WebDataset and how it works. @@ -38,7 +39,7 @@ The WebDataset library is a complete solution for working with large datasets an ## Benefits -The use of sharded, sequentially readable formats is essential for very large datasets. In addition, it has benefits in many other environments. WebDataset provides a solution that scales well from small problems on a desktop machine to very large deep learning problems in clusters or in the cloud. The following table summarizes some of the benefits in different environments. +The use of sharded, sequentially readable formats is essential for very large datasets. In addition, it has benefits in many other environments. WebDataset provides a solution that scales well from small problems on a desktop machine to very large deep learning problems in clusters or in the cloud. The following table summarizes some of the benefits in different environments. {:.table.table-striped.table-bordered} | Environment | Benefits of WebDataset | @@ -122,7 +123,7 @@ for inputs, targets in loader: ``` This code is nearly identical to the file-based I/O pipeline found in the PyTorch Imagenet example: it creates a preprocessing/augmentation pipeline, instantiates a dataset using that pipeline and a data source location, and then constructs a DataLoader instance from the dataset. - + WebDataset uses a fluent API for a configuration that internally builds up a processing pipeline. Without any added processing stages, In this example, WebDataset is used with the PyTorch DataLoader class, which replicates DataSet instances across multiple threads and performs both parallel I/O and parallel data augmentation. WebDataset instances themselves just iterate through each training sample as a dictionary: diff --git a/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md b/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md index 106761d7cf43..6503e3ce4035 100644 --- a/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md +++ b/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch 1.6 now includes Stochastic Weight Averaging' author: Pavel Izmailov, Andrew Gordon Wilson and Vincent Queneneville-Belair +image: /assets/images/bert2.png --- Do you use stochastic gradient descent (SGD) or Adam? Regardless of the procedure you use to train your neural network, you can likely achieve significantly better generalization at virtually no additional cost with a simple new technique now natively supported in PyTorch 1.6, Stochastic Weight Averaging (SWA) [1]. Even if you have already trained your model, it’s easy to realize the benefits of SWA by running SWA for a small number of epochs starting with a pre-trained model. [Again](https://twitter.com/MilesCranmer/status/1282140440892932096) and [again](https://twitter.com/leopd/status/1285969855062192129), researchers are discovering that SWA improves the performance of well-tuned models in a wide array of practical applications with little cost or effort! @@ -30,7 +31,7 @@ Previously, SWA was in PyTorch contrib. In PyTorch 1.6, we provide a new conveni At a high level, averaging SGD iterates dates back several decades in convex optimization [7, 8], where it is sometimes referred to as Polyak-Ruppert averaging, or averaged SGD. **But the details matter**. Averaged SGD is often used in conjunction with a decaying learning rate, and an exponential moving average (EMA), typically for convex optimization. In convex optimization, the focus has been on improved rates of convergence. In deep learning, this form of averaged SGD smooths the trajectory of SGD iterates but does not perform very differently. -By contrast, SWA uses an **equal average** of SGD iterates with a modified **cyclical or high constant learning rate** and exploits the flatness of training objectives [8] specific to **deep learning** for **improved generalization**. +By contrast, SWA uses an **equal average** of SGD iterates with a modified **cyclical or high constant learning rate** and exploits the flatness of training objectives [8] specific to **deep learning** for **improved generalization**. ## How does Stochastic Weight Averaging Work? @@ -48,9 +49,9 @@ While we focus on SGD for simplicity in the description above, SWA can be combin ## How to use SWA in PyTorch? -In `torch.optim.swa_utils` we implement all the SWA ingredients to make it convenient to use SWA with any model. In particular, we implement `AveragedModel` class for SWA models, `SWALR` learning rate scheduler, and `update_bn` utility function to update SWA batch normalization statistics at the end of training. +In `torch.optim.swa_utils` we implement all the SWA ingredients to make it convenient to use SWA with any model. In particular, we implement `AveragedModel` class for SWA models, `SWALR` learning rate scheduler, and `update_bn` utility function to update SWA batch normalization statistics at the end of training. -In the example below, `swa_model` is the SWA model that accumulates the averages of the weights. We train the model for a total of 300 epochs, and we switch to the SWA learning rate schedule and start to collect SWA averages of the parameters at epoch 160. +In the example below, `swa_model` is the SWA model that accumulates the averages of the weights. We train the model for a total of 300 epochs, and we switch to the SWA learning rate schedule and start to collect SWA averages of the parameters at epoch 160. ```python from torch.optim.swa_utils import AveragedModel, SWALR @@ -75,7 +76,7 @@ for epoch in range(100): # Update bn statistics for the swa_model at the end torch.optim.swa_utils.update_bn(loader, swa_model) -# Use swa_model to make predictions on test data +# Use swa_model to make predictions on test data preds = swa_model(test_input) ``` @@ -94,7 +95,7 @@ In practice, we find an equal average with the modified learning rate schedule i `SWALR` is a learning rate scheduler that anneals the learning rate to a fixed value, and then keeps it constant. For example, the following code creates a scheduler that linearly anneals the learning rate from its initial value to `0.05` in `5` epochs within each parameter group. ```python -swa_scheduler = torch.optim.swa_utils.SWALR(optimizer, +swa_scheduler = torch.optim.swa_utils.SWALR(optimizer, anneal_strategy="linear", anneal_epochs=5, swa_lr=0.05) ``` @@ -114,7 +115,7 @@ for epoch in range(100): Finally, `update_bn` is a utility function that computes the batchnorm statistics for the SWA model on a given dataloader `loader`: ``` -torch.optim.swa_utils.update_bn(loader, swa_model) +torch.optim.swa_utils.update_bn(loader, swa_model) ``` `update_bn` applies the `swa_model` to every element in the dataloader and computes the activation statistics for each batch normalization layer in the model. @@ -145,9 +146,9 @@ We release a GitHub [repo](https://github.com/izmailovpavel/torch_swa_examples) {:.table.table-striped.table-bordered} - | | VGG-16 | ResNet-164 | WideResNet-28x10 | + | | VGG-16 | ResNet-164 | WideResNet-28x10 | | ------------- | ------------- | ------------- | ------------- | -| SGD | 72.8 ± 0.3 | 78.4 ± 0.3 | 81.0 ± 0.3 | +| SGD | 72.8 ± 0.3 | 78.4 ± 0.3 | 81.0 ± 0.3 | | SWA | 74.4 ± 0.3 | 79.8 ± 0.4 | 82.5 ± 0.2 | @@ -166,8 +167,8 @@ In another follow-up [paper](http://www.gatsby.ucl.ac.uk/~balaji/udl-camera-read {:.table.table-striped.table-bordered} - | Environment Name | A2C | A2C + SWA | -| ------------- | ------------- | ------------- | + | Environment Name | A2C | A2C + SWA | +| ------------- | ------------- | ------------- | | Breakout | 522 ± 34 | 703 ± 60 | | Qbert | 18777 ± 778 | 21272 ± 655 | | SpaceInvaders | 7727 ± 1121 | 21676 ± 8897 | @@ -183,7 +184,7 @@ We can filter through quantization noise by combining weights that have been rou
-**Figure 9**. *Quantizing a solution leads to a perturbation of the weights which has a greater effect on the quality of the sharp solution (left) compared to wide solution (right)*. +**Figure 9**. *Quantizing a solution leads to a perturbation of the weights which has a greater effect on the quality of the sharp solution (left) compared to wide solution (right)*.
@@ -204,7 +205,7 @@ SWA can be viewed as taking the first moment of SGD iterates with a modified lea
**Figure 6**. *SWAG posterior approximation and the loss surface for a ResNet-20 without skip-connections trained on CIFAR-10 in the subspace formed by the two largest eigenvalues of the SWAG covariance matrix. The shape of SWAG distribution is aligned with the posterior: the peaks of the two distributions coincide, and both distributions are wider in one direction than in the orthogonal direction. Visualization created in collaboration with* [Javier Ideami](https://losslandscape.com/). -Empirically, SWAG performs on par or better than popular alternatives including MC dropout, KFAC Laplace, and temperature scaling on uncertainty quantification, out-of-distribution detection, calibration and transfer learning in computer vision tasks. Code for SWAG is available [here](https://github.com/wjmaddox/swa_gaussian). +Empirically, SWAG performs on par or better than popular alternatives including MC dropout, KFAC Laplace, and temperature scaling on uncertainty quantification, out-of-distribution detection, calibration and transfer learning in computer vision tasks. Code for SWAG is available [here](https://github.com/wjmaddox/swa_gaussian).
@@ -214,7 +215,7 @@ Empirically, SWAG performs on par or better than popular alternatives including MultiSWAG [9] uses multiple independent SWAG models to form a mixture of Gaussians as an approximate posterior distribution. Different basins of attraction contain highly complementary explanations of the data. Accordingly, marginalizing over these multiple basins provides a significant boost in accuracy and uncertainty representation. MultiSWAG can be viewed as a generalization of deep ensembles, but with performance improvements. -Indeed, we see in Figure 8 that MultiSWAG entirely mitigates double descent -- more flexible models have monotonically improving performance -- and provides significantly improved generalization over SGD. For example, when the ResNet-18 has layers of width 20, Multi-SWAG achieves under 30% error whereas SGD achieves over 45%, more than a 15% gap! +Indeed, we see in Figure 8 that MultiSWAG entirely mitigates double descent -- more flexible models have monotonically improving performance -- and provides significantly improved generalization over SGD. For example, when the ResNet-18 has layers of width 20, Multi-SWAG achieves under 30% error whereas SGD achieves over 45%, more than a 15% gap!
@@ -227,18 +228,18 @@ Another [method](https://arxiv.org/abs/1907.07504), Subspace Inference, construc ## Try it Out! -One of the greatest open questions in deep learning is why SGD manages to find good solutions, given that the training objectives are highly multimodal, and there are many settings of parameters that achieve no training loss but poor generalization. By understanding geometric features such as flatness, which relate to generalization, we can begin to resolve these questions and build optimizers that provide even better generalization, and many other useful features, such as uncertainty representation. We have presented SWA, a simple drop-in replacement for standard optimizers such as SGD and Adam, which can in principle, benefit anyone training a deep neural network. SWA has been demonstrated to have a strong performance in several areas, including computer vision, semi-supervised learning, reinforcement learning, uncertainty representation, calibration, Bayesian model averaging, and low precision training. +One of the greatest open questions in deep learning is why SGD manages to find good solutions, given that the training objectives are highly multimodal, and there are many settings of parameters that achieve no training loss but poor generalization. By understanding geometric features such as flatness, which relate to generalization, we can begin to resolve these questions and build optimizers that provide even better generalization, and many other useful features, such as uncertainty representation. We have presented SWA, a simple drop-in replacement for standard optimizers such as SGD and Adam, which can in principle, benefit anyone training a deep neural network. SWA has been demonstrated to have a strong performance in several areas, including computer vision, semi-supervised learning, reinforcement learning, uncertainty representation, calibration, Bayesian model averaging, and low precision training. -We encourage you to try out SWA! SWA is now as easy as any standard training in PyTorch. And even if you have already trained your model, you can use SWA to significantly improve performance by running it for a small number of epochs from a pre-trained model. +We encourage you to try out SWA! SWA is now as easy as any standard training in PyTorch. And even if you have already trained your model, you can use SWA to significantly improve performance by running it for a small number of epochs from a pre-trained model. [1] Averaging Weights Leads to Wider Optima and Better Generalization; Pavel Izmailov, Dmitry Podoprikhin, Timur Garipov, Dmitry Vetrov, Andrew Gordon Wilson; Uncertainty in Artificial Intelligence (UAI), 2018. -[2] There Are Many Consistent Explanations of Unlabeled Data: Why You Should Average; Ben Athiwaratkun, Marc Finzi, Pavel Izmailov, Andrew Gordon Wilson; +[2] There Are Many Consistent Explanations of Unlabeled Data: Why You Should Average; Ben Athiwaratkun, Marc Finzi, Pavel Izmailov, Andrew Gordon Wilson; International Conference on Learning Representations (ICLR), 2019. -[3] Improving Stability in Deep Reinforcement Learning with Weight Averaging; Evgenii Nikishin, Pavel Izmailov, Ben Athiwaratkun, Dmitrii Podoprikhin, +[3] Improving Stability in Deep Reinforcement Learning with Weight Averaging; Evgenii Nikishin, Pavel Izmailov, Ben Athiwaratkun, Dmitrii Podoprikhin, Timur Garipov, Pavel Shvechikov, Dmitry Vetrov, Andrew Gordon Wilson; UAI 2018 Workshop: Uncertainty in Deep Learning, 2018. [4] A Simple Baseline for Bayesian Uncertainty in Deep Learning @@ -249,7 +250,7 @@ Pavel Izmailov, Wesley Maddox, Polina Kirichenko, Timur Garipov, Dmitry Vetrov, Uncertainty in Artificial Intelligence (UAI), 2019. [6] SWALP : Stochastic Weight Averaging in Low Precision Training -Guandao Yang, Tianyi Zhang, Polina Kirichenko, Junwen Bai, +Guandao Yang, Tianyi Zhang, Polina Kirichenko, Junwen Bai, Andrew Gordon Wilson, Christopher De Sa; International Conference on Machine Learning (ICML), 2019. [7] David Ruppert. Efficient estimations from a slowly convergent Robbins-Monro process; Technical report, Cornell University Operations Research and Industrial Engineering, 1988. @@ -257,7 +258,7 @@ Andrew Gordon Wilson, Christopher De Sa; International Conference on Machine Lea [8] Acceleration of stochastic approximation by averaging. Boris T Polyak and Anatoli B Juditsky; SIAM Journal on Control and Optimization, 30(4):838–855, 1992. [9] Loss Surfaces, Mode Connectivity, and Fast Ensembling of DNNs -Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry Vetrov, +Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry Vetrov, Andrew Gordon Wilson. Neural Information Processing Systems (NeurIPS), 2018. [10] Bayesian Deep Learning and a Probabilistic Perspective of Generalization diff --git a/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md b/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md index 2be782f18b47..e71ccc435661 100644 --- a/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md +++ b/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch 1.4 released, domain libraries updated' author: Team PyTorch +image: /assets/images/bert2.png --- Today, we’re announcing the availability of PyTorch 1.4, along with updates to the PyTorch domain libraries. These releases build on top of the announcements from [NeurIPS 2019](https://pytorch.org/blog/pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community/), where we shared the availability of PyTorch Elastic, a new classification framework for image and video, and the addition of Preferred Networks to the PyTorch community. For those that attended the workshops at NeurIPS, the content can be found [here](https://research.fb.com/neurips-2019-expo-workshops/). @@ -43,7 +44,7 @@ To learn more about the APIs and the design of this feature, see the links below * [Distributed Autograd design doc](https://pytorch.org/docs/stable/notes/distributed_autograd.html) * [Remote Reference design doc](https://pytorch.org/docs/stable/notes/rref.html) -For the full tutorials, see the links below: +For the full tutorials, see the links below: * [A full RPC tutorial](https://pytorch.org/tutorials/intermediate/rpc_tutorial.html) * [Examples using model parallel training for reinforcement learning and with an LSTM](https://github.com/pytorch/examples/tree/master/distributed/rpc) diff --git a/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md b/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md index a23bdc353b4b..21b923f9e1d7 100644 --- a/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md +++ b/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'Introduction to Quantization on PyTorch' author: Raghuraman Krishnamoorthi, James Reed, Min Ni, Chris Gottbrath, and Seth Weidman +image: /assets/images/bert2.png --- It’s important to make efficient use of both server-side and on-device compute resources when developing machine learning applications. To support more efficient deployment on servers and edge devices, PyTorch added a support for model quantization using the familiar eager mode Python API. diff --git a/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md b/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md index e81d2f7da780..69cfa9843f19 100644 --- a/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md +++ b/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md @@ -2,6 +2,7 @@ layout: blog_detail title: 'PyTorch 1.5 released, new and updated APIs including C++ frontend API parity with Python' author: Team PyTorch +image: /assets/images/bert2.png --- diff --git a/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md b/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md index 69101b8abc09..904c03800a0d 100644 --- a/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md +++ b/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md @@ -2,10 +2,11 @@ layout: blog_detail title: 'PyTorch library updates including new model serving library ' author: Team PyTorch +image: /assets/images/bert2.png --- -Along with the PyTorch 1.5 release, we are announcing new libraries for high-performance PyTorch model serving and tight integration with TorchElastic and Kubernetes. Additionally, we are releasing updated packages for torch_xla (Google Cloud TPUs), torchaudio, torchvision, and torchtext. All of these new libraries and enhanced capabilities are available today and accompany all of the core features [released in PyTorch 1.5](https://pytorch.org/blog/pytorch-1-dot-5-released-with-new-and-updated-apis). +Along with the PyTorch 1.5 release, we are announcing new libraries for high-performance PyTorch model serving and tight integration with TorchElastic and Kubernetes. Additionally, we are releasing updated packages for torch_xla (Google Cloud TPUs), torchaudio, torchvision, and torchtext. All of these new libraries and enhanced capabilities are available today and accompany all of the core features [released in PyTorch 1.5](https://pytorch.org/blog/pytorch-1-dot-5-released-with-new-and-updated-apis). ## TorchServe (Experimental) @@ -35,7 +36,7 @@ To learn more see the [TorchElastic repo](http://pytorch.org/elastic/0.2.0rc0/ku ## torch_xla 1.5 now available -[torch_xla](http://pytorch.org/xla/) is a Python package that uses the [XLA linear algebra compiler](https://www.tensorflow.org/xla) to accelerate the [PyTorch deep learning framework](https://pytorch.org/) on [Cloud TPUs](https://cloud.google.com/tpu/) and [Cloud TPU Pods](https://cloud.google.com/tpu/docs/tutorials/pytorch-pod). torch_xla aims to give PyTorch users the ability to do everything they can do on GPUs on Cloud TPUs as well while minimizing changes to the user experience. The project began with a conversation at NeurIPS 2017 and gathered momentum in 2018 when teams from Facebook and Google came together to create a proof of concept. We announced this collaboration at PTDC 2018 and made the PyTorch/XLA integration broadly available at PTDC 2019. The project already has 28 contributors, nearly 2k commits, and a repo that has been forked more than 100 times. +[torch_xla](http://pytorch.org/xla/) is a Python package that uses the [XLA linear algebra compiler](https://www.tensorflow.org/xla) to accelerate the [PyTorch deep learning framework](https://pytorch.org/) on [Cloud TPUs](https://cloud.google.com/tpu/) and [Cloud TPU Pods](https://cloud.google.com/tpu/docs/tutorials/pytorch-pod). torch_xla aims to give PyTorch users the ability to do everything they can do on GPUs on Cloud TPUs as well while minimizing changes to the user experience. The project began with a conversation at NeurIPS 2017 and gathered momentum in 2018 when teams from Facebook and Google came together to create a proof of concept. We announced this collaboration at PTDC 2018 and made the PyTorch/XLA integration broadly available at PTDC 2019. The project already has 28 contributors, nearly 2k commits, and a repo that has been forked more than 100 times. This release of [torch_xla](http://pytorch.org/xla/) is aligned and tested with PyTorch 1.5 to reduce friction for developers and to provide a stable and mature PyTorch/XLA stack for training models using Cloud TPU hardware. You can [try it for free](https://medium.com/pytorch/get-started-with-pytorch-cloud-tpus-and-colab-a24757b8f7fc) in your browser on an 8-core Cloud TPU device with [Google Colab](https://colab.research.google.com/), and you can use it at a much larger scaleon [Google Cloud](https://cloud.google.com/gcp). @@ -48,9 +49,9 @@ torchaudio, torchvision, and torchtext complement PyTorch with common datasets, ### torchaudio 0.5 The torchaudio 0.5 release includes new transforms, functionals, and datasets. Highlights for the release include: -* Added the Griffin-Lim functional and transform, `InverseMelScale` and `Vol` transforms, and `DB_to_amplitude`. +* Added the Griffin-Lim functional and transform, `InverseMelScale` and `Vol` transforms, and `DB_to_amplitude`. * Added support for `allpass`, `fade`, `bandpass`, `bandreject`, `band`, `treble`, `deemph`, and `riaa` filters and transformations. -* New datasets added including `LJSpeech` and `SpeechCommands` datasets. +* New datasets added including `LJSpeech` and `SpeechCommands` datasets. See the release full notes [here](https://github.com/pytorch/audio/releases) and full docs can be found [here](https://pytorch.org/audio/). @@ -58,7 +59,7 @@ See the release full notes [here](https://github.com/pytorch/audio/releases) and The torchvision 0.6 release includes updates to datasets, models and a significant number of bug fixes. Highlights include: * Faster R-CNN now supports negative samples which allows the feeding of images without annotations at training time. -* Added `aligned` flag to `RoIAlign` to match Detectron2. +* Added `aligned` flag to `RoIAlign` to match Detectron2. * Refactored abstractions for C++ video decoder See the release full notes [here](https://github.com/pytorch/vision/releases) and full docs can be found [here](https://pytorch.org/docs/stable/torchvision/index.html). @@ -68,9 +69,9 @@ The torchtext 0.6 release includes a number of bug fixes and improvements to doc * Fixed an issue related to the SentencePiece dependency in conda package. * Added support for the experimental IMDB dataset to allow a custom vocab. -* A number of documentation updates including adding a code of conduct and a deduplication of the docs on the torchtext site. +* A number of documentation updates including adding a code of conduct and a deduplication of the docs on the torchtext site. -Your feedback and discussions on the experimental datasets API are welcomed. You can send them to [issue #664](https://github.com/pytorch/text/issues/664). We would also like to highlight the pull request [here](https://github.com/pytorch/text/pull/701) where the latest dataset abstraction is applied to the text classification datasets. The feedback can be beneficial to finalizing this abstraction. +Your feedback and discussions on the experimental datasets API are welcomed. You can send them to [issue #664](https://github.com/pytorch/text/issues/664). We would also like to highlight the pull request [here](https://github.com/pytorch/text/pull/701) where the latest dataset abstraction is applied to the text classification datasets. The feedback can be beneficial to finalizing this abstraction. See the release full notes [here](https://github.com/pytorch/text/releases) and full docs can be found [here](https://pytorch.org/text/). diff --git a/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md b/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md index 1f0f8a9fc6d5..7fc6210a4b77 100644 --- a/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md +++ b/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md @@ -2,14 +2,15 @@ layout: blog_detail title: 'Updates & Improvements to PyTorch Tutorials' author: Team PyTorch +image: /assets/images/bert2.png --- -PyTorch.org provides researchers and developers with documentation, installation instructions, latest news, community projects, tutorials, and more. Today, we are introducing usability and content improvements including tutorials in additional categories, a new recipe format for quickly referencing common topics, sorting using tags, and an updated homepage. +PyTorch.org provides researchers and developers with documentation, installation instructions, latest news, community projects, tutorials, and more. Today, we are introducing usability and content improvements including tutorials in additional categories, a new recipe format for quickly referencing common topics, sorting using tags, and an updated homepage. -Let’s take a look at them in detail. +Let’s take a look at them in detail. ## TUTORIALS HOME PAGE UPDATE -The tutorials home page now provides clear actions that developers can take. For new PyTorch users, there is an easy-to-discover button to take them directly to “A 60 Minute Blitz”. Right next to it, there is a button to view all recipes which are designed to teach specific features quickly with examples. +The tutorials home page now provides clear actions that developers can take. For new PyTorch users, there is an easy-to-discover button to take them directly to “A 60 Minute Blitz”. Right next to it, there is a button to view all recipes which are designed to teach specific features quickly with examples.
@@ -26,7 +27,7 @@ The following additional resources can also be found at the bottom of the Tutori * [PyTorch Examples](https://github.com/pytorch/examples) * [Tutorial on GitHub](https://github.com/pytorch/tutorials) -## PYTORCH RECIPES +## PYTORCH RECIPES Recipes are new bite-sized, actionable examples designed to teach researchers and developers how to use specific PyTorch features. Some notable new recipes include: * [Loading Data in PyTorch](https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe.html) * [Model Interpretability Using Captum](https://pytorch.org/tutorials/recipes/recipes/Captum_Recipe.html) @@ -35,7 +36,7 @@ Recipes are new bite-sized, actionable examples designed to teach researchers an View the full recipes [here](http://pytorch.org/tutorials/recipes/recipes_index.html). ## LEARNING PYTORCH -This section includes tutorials designed for users new to PyTorch. Based on community feedback, we have made updates to the current [Deep Learning with PyTorch: A 60 Minute Blitz](https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html) tutorial, one of our most popular tutorials for beginners. Upon completion, one can understand what PyTorch and neural networks are, and be able to build and train a simple image classification network. Updates include adding explanations to clarify output meanings and linking back to where users can read more in the docs, cleaning up confusing syntax errors, and reconstructing and explaining new concepts for easier readability. +This section includes tutorials designed for users new to PyTorch. Based on community feedback, we have made updates to the current [Deep Learning with PyTorch: A 60 Minute Blitz](https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html) tutorial, one of our most popular tutorials for beginners. Upon completion, one can understand what PyTorch and neural networks are, and be able to build and train a simple image classification network. Updates include adding explanations to clarify output meanings and linking back to where users can read more in the docs, cleaning up confusing syntax errors, and reconstructing and explaining new concepts for easier readability. ## DEPLOYING MODELS IN PRODUCTION This section includes tutorials for developers looking to take their PyTorch models to production. The tutorials include: @@ -45,7 +46,7 @@ This section includes tutorials for developers looking to take their PyTorch mod * [Exploring a Model from PyTorch to ONNX and Running it using ONNX Runtime](https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html) ## FRONTEND APIS -PyTorch provides a number of frontend API features that can help developers to code, debug, and validate their models more efficiently. This section includes tutorials that teach what these features are and how to use them. Some tutorials to highlight: +PyTorch provides a number of frontend API features that can help developers to code, debug, and validate their models more efficiently. This section includes tutorials that teach what these features are and how to use them. Some tutorials to highlight: * [Introduction to Named Tensors in PyTorch](https://pytorch.org/tutorials/intermediate/named_tensor_tutorial.html) * [Using the PyTorch C++ Frontend](https://pytorch.org/tutorials/advanced/cpp_frontend.html) * [Extending TorchScript with Custom C++ Operators](https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html) @@ -59,7 +60,7 @@ Deep learning models often consume large amounts of memory, power, and compute d * [Static Quantization with Eager Mode in PyTorch](https://pytorch.org/tutorials/advanced/static_quantization_tutorial.html) ## PARALLEL AND DISTRIBUTED TRAINING -PyTorch provides features that can accelerate performance in research and production such as native support for asynchronous execution of collective operations and peer-to-peer communication that is accessible from Python and C++. This section includes tutorials on parallel and distributed training: +PyTorch provides features that can accelerate performance in research and production such as native support for asynchronous execution of collective operations and peer-to-peer communication that is accessible from Python and C++. This section includes tutorials on parallel and distributed training: * [Single-Machine Model Parallel Best Practices](https://pytorch.org/tutorials/intermediate/model_parallel_tutorial.html) * [Getting started with Distributed Data Parallel](https://pytorch.org/tutorials/intermediate/ddp_tutorial.html) * [Getting started with Distributed RPC Framework](https://pytorch.org/tutorials/intermediate/rpc_tutorial.html) diff --git a/_posts/2020-7-28-pytorch-1.6-released.md b/_posts/2020-7-28-pytorch-1.6-released.md index d1a18284dc69..5150ea8b854b 100644 --- a/_posts/2020-7-28-pytorch-1.6-released.md +++ b/_posts/2020-7-28-pytorch-1.6-released.md @@ -2,24 +2,25 @@ layout: blog_detail title: 'PyTorch 1.6 released w/ Native AMP Support, Microsoft joins as maintainers for Windows' author: Team PyTorch +image: /assets/images/bert2.png --- Today, we’re announcing the availability of PyTorch 1.6, along with updated domain libraries. We are also excited to announce the team at [Microsoft is now maintaining Windows builds and binaries](https://pytorch.org/blog/microsoft-becomes-maintainer-of-the-windows-version-of-pytorch) and will also be supporting the community on GitHub as well as the PyTorch Windows discussion forums. -The PyTorch 1.6 release includes a number of new APIs, tools for performance improvement and profiling, as well as major updates to both distributed data parallel (DDP) and remote procedure call (RPC) based distributed training. -A few of the highlights include: +The PyTorch 1.6 release includes a number of new APIs, tools for performance improvement and profiling, as well as major updates to both distributed data parallel (DDP) and remote procedure call (RPC) based distributed training. +A few of the highlights include: -1. Automatic mixed precision (AMP) training is now natively supported and a stable feature (See [here](https://pytorch.org/blog/accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision/) for more details) - thanks for NVIDIA’s contributions; -2. Native TensorPipe support now added for tensor-aware, point-to-point communication primitives built specifically for machine learning; +1. Automatic mixed precision (AMP) training is now natively supported and a stable feature (See [here](https://pytorch.org/blog/accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision/) for more details) - thanks for NVIDIA’s contributions; +2. Native TensorPipe support now added for tensor-aware, point-to-point communication primitives built specifically for machine learning; 3. Added support for complex tensors to the frontend API surface; 4. New profiling tools providing tensor-level memory consumption information; 5. Numerous improvements and new features for both distributed data parallel (DDP) training and the remote procedural call (RPC) packages. -Additionally, from this release onward, features will be classified as Stable, Beta and Prototype. Prototype features are not included as part of the binary distribution and are instead available through either building from source, using nightlies or via compiler flag. You can learn more about what this change means in the post [here](https://pytorch.org/blog/pytorch-feature-classification-changes/). You can also find the full release notes [here](https://github.com/pytorch/pytorch/releases). +Additionally, from this release onward, features will be classified as Stable, Beta and Prototype. Prototype features are not included as part of the binary distribution and are instead available through either building from source, using nightlies or via compiler flag. You can learn more about what this change means in the post [here](https://pytorch.org/blog/pytorch-feature-classification-changes/). You can also find the full release notes [here](https://github.com/pytorch/pytorch/releases). # Performance & Profiling -## [Stable] Automatic Mixed Precision (AMP) Training +## [Stable] Automatic Mixed Precision (AMP) Training AMP allows users to easily enable automatic mixed precision training enabling higher performance and memory savings of up to 50% on Tensor Core GPUs. Using the natively supported `torch.cuda.amp` API, AMP provides convenience methods for mixed precision, where some operations use the `torch.float32 (float)` datatype and other operations use `torch.float16 (half)`. Some ops, like linear layers and convolutions, are much faster in `float16`. Other ops, like reductions, often require the dynamic range of `float32`. Mixed precision tries to match each op to its appropriate datatype. @@ -27,7 +28,7 @@ AMP allows users to easily enable automatic mixed precision training enabling hi * Documentation ([Link](https://pytorch.org/docs/stable/amp.html)) * Usage examples ([Link](https://pytorch.org/docs/stable/notes/amp_examples.html)) -## [Beta] Fork/Join Parallelism +## [Beta] Fork/Join Parallelism This release adds support for a language-level construct as well as runtime support for coarse-grained parallelism in TorchScript code. This support is useful for situations such as running models in an ensemble in parallel, or running bidirectional components of recurrent nets in parallel, and allows the ability to unlock the computational power of parallel architectures (e.g. many-core CPUs) for task level parallelism. @@ -48,10 +49,10 @@ def example(x): print(example(torch.ones([]))) ``` - + * Documentation ([Link](https://pytorch.org/docs/stable/jit.html)) -## [Beta] Memory Profiler +## [Beta] Memory Profiler The `torch.autograd.profiler` API now includes a memory profiler that lets you inspect the tensor memory cost of different operators inside your CPU and GPU models. @@ -83,7 +84,7 @@ print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10)) * PR ([Link](https://github.com/pytorch/pytorch/pull/37775)) * Documentation ([Link](https://pytorch.org/docs/stable/autograd.html#profiler)) -# Distributed Training & RPC +# Distributed Training & RPC ## [Beta] TensorPipe backend for RPC @@ -103,11 +104,11 @@ torch.distributed.rpc.rpc_sync(...) * Design doc ([Link](https://github.com/pytorch/pytorch/issues/35251)) * Documentation ([Link](https://pytorch.org/docs/stable/rpc/index.html)) -## [Beta] DDP+RPC +## [Beta] DDP+RPC PyTorch Distributed supports two powerful paradigms: DDP for full sync data parallel training of models and the RPC framework which allows for distributed model parallelism. Previously, these two features worked independently and users couldn’t mix and match these to try out hybrid parallelism paradigms. -Starting in PyTorch 1.6, we’ve enabled DDP and RPC to work together seamlessly so that users can combine these two techniques to achieve both data parallelism and model parallelism. An example is where users would like to place large embedding tables on parameter servers and use the RPC framework for embedding lookups, but store smaller dense parameters on trainers and use DDP to synchronize the dense parameters. Below is a simple code snippet. +Starting in PyTorch 1.6, we’ve enabled DDP and RPC to work together seamlessly so that users can combine these two techniques to achieve both data parallelism and model parallelism. An example is where users would like to place large embedding tables on parameter servers and use the RPC framework for embedding lookups, but store smaller dense parameters on trainers and use DDP to synchronize the dense parameters. Below is a simple code snippet. ```python // On each trainer @@ -139,11 +140,11 @@ def async_add_chained(to, x, y, z): ) ret = rpc.rpc_sync( - "worker1", - async_add_chained, + "worker1", + async_add_chained, args=("worker2", torch.ones(2), 1, 1) ) - + print(ret) # prints tensor([3., 3.]) ``` @@ -153,15 +154,15 @@ print(ret) # prints tensor([3., 3.]) # Frontend API Updates -## [Beta] Complex Numbers +## [Beta] Complex Numbers -The PyTorch 1.6 release brings beta level support for complex tensors including torch.complex64 and torch.complex128 dtypes. A complex number is a number that can be expressed in the form a + bj, where a and b are real numbers, and j is a solution of the equation x^2 = −1. Complex numbers frequently occur in mathematics and engineering, especially in signal processing and the area of complex neural networks is an active area of research. The beta release of complex tensors will support common PyTorch and complex tensor functionality, plus functions needed by Torchaudio, ESPnet and others. While this is an early version of this feature, and we expect it to improve over time, the overall goal is provide a NumPy compatible user experience that leverages PyTorch’s ability to run on accelerators and work with autograd to better support the scientific community. +The PyTorch 1.6 release brings beta level support for complex tensors including torch.complex64 and torch.complex128 dtypes. A complex number is a number that can be expressed in the form a + bj, where a and b are real numbers, and j is a solution of the equation x^2 = −1. Complex numbers frequently occur in mathematics and engineering, especially in signal processing and the area of complex neural networks is an active area of research. The beta release of complex tensors will support common PyTorch and complex tensor functionality, plus functions needed by Torchaudio, ESPnet and others. While this is an early version of this feature, and we expect it to improve over time, the overall goal is provide a NumPy compatible user experience that leverages PyTorch’s ability to run on accelerators and work with autograd to better support the scientific community. # Updated Domain Libraries -## torchvision 0.7 +## torchvision 0.7 -torchvision 0.7 introduces two new pretrained semantic segmentation models, [FCN ResNet50](https://arxiv.org/abs/1411.4038) and [DeepLabV3 ResNet50](https://arxiv.org/abs/1706.05587), both trained on COCO and using smaller memory footprints than the ResNet101 backbone. We also introduced support for AMP (Automatic Mixed Precision) autocasting for torchvision models and operators, which automatically selects the floating point precision for different GPU operations to improve performance while maintaining accuracy. +torchvision 0.7 introduces two new pretrained semantic segmentation models, [FCN ResNet50](https://arxiv.org/abs/1411.4038) and [DeepLabV3 ResNet50](https://arxiv.org/abs/1706.05587), both trained on COCO and using smaller memory footprints than the ResNet101 backbone. We also introduced support for AMP (Automatic Mixed Precision) autocasting for torchvision models and operators, which automatically selects the floating point precision for different GPU operations to improve performance while maintaining accuracy. * Release notes ([Link](https://github.com/pytorch/vision/releases)) @@ -178,10 +179,10 @@ torchaudio now officially supports Windows. This release also introduces a new m The Global PyTorch Summer Hackathon is back! This year, teams can compete in three categories virtually: 1. **PyTorch Developer Tools:** Tools or libraries designed to improve productivity and efficiency of PyTorch for researchers and developers - 2. **Web/Mobile Applications powered by PyTorch:** Applications with web/mobile interfaces and/or embedded devices powered by PyTorch + 2. **Web/Mobile Applications powered by PyTorch:** Applications with web/mobile interfaces and/or embedded devices powered by PyTorch 3. **PyTorch Responsible AI Development Tools:** Tools, libraries, or web/mobile apps for responsible AI development -This is a great opportunity to connect with the community and practice your machine learning skills. +This is a great opportunity to connect with the community and practice your machine learning skills. * [Join the hackathon](http://pytorch2020.devpost.com/) * [Watch educational videos](https://www.youtube.com/pytorch) @@ -189,11 +190,11 @@ This is a great opportunity to connect with the community and practice your mach ## LPCV Challenge -The [2020 CVPR Low-Power Vision Challenge (LPCV) - Online Track for UAV video](https://lpcv.ai/2020CVPR/video-track) submission deadline is coming up shortly. You have until July 31, 2020 to build a system that can discover and recognize characters in video captured by an unmanned aerial vehicle (UAV) accurately using PyTorch and Raspberry Pi 3B+. +The [2020 CVPR Low-Power Vision Challenge (LPCV) - Online Track for UAV video](https://lpcv.ai/2020CVPR/video-track) submission deadline is coming up shortly. You have until July 31, 2020 to build a system that can discover and recognize characters in video captured by an unmanned aerial vehicle (UAV) accurately using PyTorch and Raspberry Pi 3B+. ## Prototype Features -To reiterate, Prototype features in PyTorch are early features that we are looking to gather feedback on, gauge the usefulness of and improve ahead of graduating them to Beta or Stable. The following features are not part of the PyTorch 1.6 release and instead are available in nightlies with separate docs/tutorials to help facilitate early usage and feedback. +To reiterate, Prototype features in PyTorch are early features that we are looking to gather feedback on, gauge the usefulness of and improve ahead of graduating them to Beta or Stable. The following features are not part of the PyTorch 1.6 release and instead are available in nightlies with separate docs/tutorials to help facilitate early usage and feedback. #### Distributed RPC/Profiler Allow users to profile training jobs that use `torch.distributed.rpc` using the autograd profiler, and remotely invoke the profiler in order to collect profiling information across different nodes. The RFC can be found [here](https://github.com/pytorch/pytorch/issues/39675) and a short recipe on how to use this feature can be found [here](https://github.com/pytorch/tutorials/tree/master/prototype_source). diff --git a/_sass/blog.scss b/_sass/blog.scss index 7a3a1ffa61e1..ac172115134e 100644 --- a/_sass/blog.scss +++ b/_sass/blog.scss @@ -62,7 +62,7 @@ } @include desktop { margin-top: 380px + $desktop_header_height; - .row.blog-index + /*.row.blog-index [class*="col-"]:not(:first-child):not(:last-child):not(:nth-child(3n)) { padding-right: rem(35px); padding-left: rem(35px); @@ -74,7 +74,7 @@ .row.blog-index [class*="col-"]:nth-child(3n + 1) { padding-right: rem(35px); - } + }*/ .col-md-4 { margin-bottom: rem(23px); @@ -280,3 +280,17 @@ twitterwidget { margin-bottom: rem(18px) !important; } +.blog .pagination { + .page { + border: 1px solid #dee2e6; + padding: 0.5rem 0.75rem; + } + + .active .page { + background-color: #dee2e6; + } +} + +.blog .blog-img { + border: 1px solid $dark_grey; +} diff --git a/blog/index.html b/blog/index.html deleted file mode 100644 index 1a3505711aac..000000000000 --- a/blog/index.html +++ /dev/null @@ -1,32 +0,0 @@ ---- -layout: blog -title: Blog -permalink: /blog/all-posts -body-class: blog ---- - -{% assign posts = site.posts %} - -{% include blog_jumbotron.html posts=posts %} - -
-
-
-
- {% for post in posts %} -
-

{{ post.date | date: '%B %d, %Y' }}

-

- {{ post.title }} -

-

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

- -
- {% endfor %} -
-
-
-
- - - diff --git a/blog/landing-page.html b/blog/landing-page.html index 572fdc94e746..718d3e045c7c 100644 --- a/blog/landing-page.html +++ b/blog/landing-page.html @@ -18,14 +18,14 @@
{% for post in posts %} -
-

{{ post.date | date: '%B %d, %Y' }}

-

- {{ post.title }} -

-

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

- -
+
+

{{ post.date | date: '%B %d, %Y' }}

+

+ {{ post.title }} +

+

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

+ +
{% endfor %}
From d798df28b6d97e53cc2ac394dde7714b7bfb9f47 Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Thu, 27 Aug 2020 11:24:46 -0400 Subject: [PATCH 04/10] update --- blog/all-posts.html | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 blog/all-posts.html diff --git a/blog/all-posts.html b/blog/all-posts.html new file mode 100644 index 000000000000..36048d2667b4 --- /dev/null +++ b/blog/all-posts.html @@ -0,0 +1,38 @@ +--- +layout: blog +title: Blog +permalink: /blog/all-posts +body-class: blog +--- + +{% assign posts = site.posts %} + +{% include blog_jumbotron.html posts=posts %} + +
+
+
+
+ +
+ {% for post in posts %} +
+ +

+ {{ post.title }} +

+

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

+

{{ post.date | date: '%B %d, %Y' }}

+
+ {% endfor %} +
+ + + +
+
+
+
+ + + From 26aa53839c017135d56178c75a6966f81cb0b1f4 Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Thu, 27 Aug 2020 16:21:39 -0400 Subject: [PATCH 05/10] wip --- _posts/2017-5-11-a-tour-of-pytorch-internals-1.md | 1 + _posts/2017-6-27-a-tour-of-pytorch-internals-2.md | 1 + _posts/2018-01-19-a-year-in.md | 1 + _posts/2018-03-5-tensor-comprehensions.md | 1 + _posts/2018-04-22-pytorch-0_4_0-migration-guide.md | 1 + _posts/2018-05-2-the-road-to-1_0.md | 1 + _posts/2019-05-08-model-serving-in-pyorch.md | 1 + _posts/2019-05-1-pytorch-adds-new-dev-tools.md | 1 + ...-06-10-towards-reproducible-research-with-pytorch-hub.md | 1 + _posts/2019-07-18-pytorch-ecosystem.md | 1 + _posts/2019-07-23-mapillary-research.md | 1 + _posts/2019-08-08-pytorch-1.2-and-domain-api-release.md | 1 + ...-3-adds-mobile-privacy-quantization-and-named-tensors.md | 1 + ...h-launch-fellowship-funding-for-privacy-preserving-ml.md | 1 + ...ibraries-welcomes-preferred-networks-to-its-community.md | 1 + _posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md | 1 + _posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md | 1 + _posts/2019-5-22-torchvision03.md | 1 + ...on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md | 1 + ...-becomes-maintainer-of-the-windows-version-of-pytorch.md | 1 + _posts/2020-07-28-pytorch-feature-classification-changes.md | 1 + ...ch-io-library-for-large-datasets-many-files-many-gpus.md | 1 + ...-pytorch-1.6-now-includes-stochastic-weight-averaging.md | 5 ++++- ...pytorch-1-dot-4-released-and-domain-libraries-updated.md | 1 + _posts/2020-3-26-introduction-to-quantization-on-pytorch.md | 1 + ...21-pytorch-1-dot-5-released-with-new-and-updated-apis.md | 1 + ...-21-pytorch-library-updates-new-model-serving-library.md | 1 + .../2020-5-5-updates-improvements-to-pytorch-tutorials.md | 1 + _posts/2020-7-28-pytorch-1.6-released.md | 1 + blog/all-posts.html | 6 +++--- blog/landing-page.html | 3 +-- 31 files changed, 36 insertions(+), 6 deletions(-) diff --git a/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md b/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md index 2d43f20b5bdf..3ebc97c3262e 100644 --- a/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md +++ b/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md @@ -5,6 +5,7 @@ author: "Trevor Killeen" date: 2017-05-11 12:00:00 -0500 redirect_from: /2017/05/11/Internals.html image: /assets/images/bert2.png +tags: [one] --- The fundamental unit in PyTorch is the Tensor. This post will serve as an overview for how we implement Tensors in PyTorch, such that the user can interact with it from the Python shell. In particular, we want to answer four main questions: diff --git a/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md b/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md index 40ea89694fbe..8452d02ff7a9 100644 --- a/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md +++ b/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md @@ -5,6 +5,7 @@ author: "Trevor Killeen" date: 2017-06-27 12:00:00 -0500 redirect_from: /2017/06/27/Internals2.html image: /assets/images/bert2.png +tags: [one] --- In the first [post]({{ site.baseurl }}{% link _posts/2017-5-11-a-tour-of-pytorch-internals-1.md %}) I explained how we generate a `torch.Tensor` object that you can use in your Python interpreter. Next, I will explore the build system for PyTorch. The PyTorch codebase has a variety of components: diff --git a/_posts/2018-01-19-a-year-in.md b/_posts/2018-01-19-a-year-in.md index e2fa460b2a20..25d213842698 100644 --- a/_posts/2018-01-19-a-year-in.md +++ b/_posts/2018-01-19-a-year-in.md @@ -5,6 +5,7 @@ author: "The PyTorch Team" date: 2018-01-19 12:00:00 -0500 redirect_from: /2018/01/19/a-year-in.html image: /assets/images/bert2.png +tags: [one] --- Today marks 1 year since PyTorch was released publicly. It's been a wild ride — our quest to build a flexible deep learning research platform. Over the last year, we've seen an amazing community of people using, contributing to and evangelizing PyTorch — thank you for the love. diff --git a/_posts/2018-03-5-tensor-comprehensions.md b/_posts/2018-03-5-tensor-comprehensions.md index a3fb12e28a27..5b2391731581 100644 --- a/_posts/2018-03-5-tensor-comprehensions.md +++ b/_posts/2018-03-5-tensor-comprehensions.md @@ -4,6 +4,7 @@ title: 'Tensor Comprehensions in PyTorch' author: Priya Goyal (FAIR), Nicolas Vasilache (FAIR), Oleksandr Zinenko (Inria & DI ENS), Theodoros Theodoridis (ETH Zürich), Zachary DeVito (FAIR), William S. Moses (MIT CSAIL), Sven Verdoolaege (FAIR), Andrew Adams (FAIR), Albert Cohen (Inria & DI ENS & FAIR) redirect_from: /2018/03/05/tensor-comprehensions.html image: /assets/images/bert2.png +tags: [two] --- Tensor Comprehensions (TC) is a tool that lowers the barrier for writing high-performance code. It generates GPU code from a simple high-level language and autotunes the code for specific input sizes. diff --git a/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md b/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md index 2e9a7421f190..0934bc62758b 100644 --- a/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md +++ b/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch 0.4.0 Migration Guide' redirect_from: /2018/04/22/0_4_0-migration-guide.html image: /assets/images/bert2.png +tags: [two] --- Welcome to the migration guide for PyTorch 0.4.0. In this release we introduced [many exciting new features and critical bug fixes](https://github.com/pytorch/pytorch/releases/tag/v0.4.0), with the goal of providing users a better and cleaner interface. In this guide, we will cover the most important changes in migrating existing code from previous versions: diff --git a/_posts/2018-05-2-the-road-to-1_0.md b/_posts/2018-05-2-the-road-to-1_0.md index 61d6edfd151f..f5640bcefb5e 100644 --- a/_posts/2018-05-2-the-road-to-1_0.md +++ b/_posts/2018-05-2-the-road-to-1_0.md @@ -4,6 +4,7 @@ title: 'The road to 1.0: production ready PyTorch' author: The PyTorch Team redirect_from: /2018/05/02/road-to-1.0.html image: /assets/images/bert2.png +tags: [two] --- We would like to give you a preview of the roadmap for PyTorch 1.0 , the next release of PyTorch. Over the last year, we've had 0.2, 0.3 and 0.4 transform PyTorch from a [Torch+Chainer]-like interface into something cleaner, adding double-backwards, numpy-like functions, advanced indexing and removing Variable boilerplate. At this time, we're confident that the API is in a reasonable and stable state to confidently release a 1.0. diff --git a/_posts/2019-05-08-model-serving-in-pyorch.md b/_posts/2019-05-08-model-serving-in-pyorch.md index 5a091b12b72c..264bbccfe2dd 100644 --- a/_posts/2019-05-08-model-serving-in-pyorch.md +++ b/_posts/2019-05-08-model-serving-in-pyorch.md @@ -4,6 +4,7 @@ title: 'Model Serving in PyTorch' author: Jeff Smith redirect_from: /2019/05/08/model-serving-in-pyorch.html image: /assets/images/bert2.png +tags: [three] --- PyTorch has seen a lot of adoption in research, but people can get confused about how well PyTorch models can be taken into production. This blog post is meant to clear up any confusion people might have about the road to production in PyTorch. diff --git a/_posts/2019-05-1-pytorch-adds-new-dev-tools.md b/_posts/2019-05-1-pytorch-adds-new-dev-tools.md index 0588763c4814..0db155bc65f3 100644 --- a/_posts/2019-05-1-pytorch-adds-new-dev-tools.md +++ b/_posts/2019-05-1-pytorch-adds-new-dev-tools.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch adds new dev tools as it hits production scale' author: The PyTorch Team image: /assets/images/bert2.png +tags: [three] --- _This is a partial re-post of the original blog post on the Facebook AI Blog. The full post can be [viewed here](https://ai.facebook.com/blog/pytorch-adds-new-dev-tools-as-it-hits-production-scale/)_ diff --git a/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md b/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md index e7e74f430db7..510f98af3835 100644 --- a/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md +++ b/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md @@ -4,6 +4,7 @@ title: 'Towards Reproducible Research with PyTorch Hub' author: Team PyTorch redirect_from: /2019/06/10/pytorch_hub.html image: /assets/images/bert2.png +tags: [three] --- Reproducibility is an essential requirement for many fields of research including those based on machine learning techniques. However, many machine learning publications are either not reproducible or are difficult to reproduce. With the continued growth in the number of research publications, including tens of thousands of papers now hosted on arXiv and submissions to conferences at an all time high, research reproducibility is more important than ever. While many of these publications are accompanied by code as well as trained models which is helpful but still leaves a number of steps for users to figure out for themselves. diff --git a/_posts/2019-07-18-pytorch-ecosystem.md b/_posts/2019-07-18-pytorch-ecosystem.md index b0cf03ad50a7..c61de744edb1 100644 --- a/_posts/2019-07-18-pytorch-ecosystem.md +++ b/_posts/2019-07-18-pytorch-ecosystem.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch Adds New Ecosystem Projects for Encrypted AI and Quantum Computing, Expands PyTorch Hub' author: Team PyTorch image: /assets/images/bert2.png +tags: [four] --- The PyTorch ecosystem includes projects, tools, models and libraries from a broad community of researchers in academia and industry, application developers, and ML engineers. The goal of this ecosystem is to support, accelerate, and aid in your exploration with PyTorch and help you push the state of the art, no matter what field you are exploring. Similarly, we are expanding the recently launched PyTorch Hub to further help you discover and reproduce the latest research. diff --git a/_posts/2019-07-23-mapillary-research.md b/_posts/2019-07-23-mapillary-research.md index e15f87708e40..de2f5ba261c2 100644 --- a/_posts/2019-07-23-mapillary-research.md +++ b/_posts/2019-07-23-mapillary-research.md @@ -4,6 +4,7 @@ title: 'Mapillary Research: Seamless Scene Segmentation and In-Place Activated B author: Lorenzo Porzi, Mapillary redirect_from: /2019/07/23/mapillary-research.html image: /assets/images/bert2.png +tags: [four] --- With roads in developed countries like the US changing up to 15% annually, Mapillary addresses a growing demand for keeping maps updated by combining images from any camera into a 3D visualization of the world. Mapillary's independent and collaborative approach enables anyone to collect, share, and use street-level images for improving maps, developing cities, and advancing the automotive industry. diff --git a/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md b/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md index 06d443b758a5..1aacb55e0aa2 100644 --- a/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md +++ b/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md @@ -4,6 +4,7 @@ title: 'New Releases: PyTorch 1.2, torchtext 0.4, torchaudio 0.3, and torchvisio author: Team PyTorch redirect_from: /2019/08/06/pytorch_aug2019_releases.html image: /assets/images/bert2.png +tags: [four] --- Since the release of PyTorch 1.0, we’ve seen the community expand to add new tools, contribute to a growing set of models available in the PyTorch Hub, and continually increase usage in both research and production. diff --git a/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md b/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md index 95838fe3a275..0eb7a4531c1f 100644 --- a/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md +++ b/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch 1.3 adds mobile, privacy, quantization, and named tensors' author: Team PyTorch image: /assets/images/bert2.png +tags: [four] --- PyTorch continues to gain momentum because of its focus on meeting the needs of researchers, its streamlined workflow for production use, and most of all because of the enthusiastic support it has received from the AI community. PyTorch citations in papers on ArXiv [grew 194 percent in the first half of 2019 alone, as noted by O’Reilly](https://www.oreilly.com/ideas/one-simple-graphic-researchers-love-pytorch-and-tensorflow?fbclid=IwAR3kYmlyD7zky37IYFu0cafQn7yemhl8P-7MNyB30z0q5RDzxcTOrP8kxDk), and the number of contributors to the platform has grown more than 50 percent over the last year, to nearly 1,200. Facebook, Microsoft, Uber, and other organizations across industries are increasingly using it as the foundation for their most important machine learning (ML) research and production workloads. diff --git a/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md b/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md index db63192b28bc..8ce3af280fc4 100644 --- a/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md +++ b/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md @@ -4,6 +4,7 @@ title: 'OpenMined and PyTorch partner to launch fellowship funding for privacy-p author: Andrew Trask (OpenMined/U.Oxford), Shubho Sengupta, Laurens van der Maaten, Joe Spisak excerpt: Many applications of machine learning (ML) pose a range of security and privacy challenges. image: /assets/images/bert2.png +tags: [four] ---
diff --git a/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md b/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md index 92f1eace1811..d51f001077ac 100644 --- a/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md +++ b/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch adds new tools and libraries, welcomes Preferred Networks to its community' author: Team PyTorch image: /assets/images/bert2.png +tags: [four] --- PyTorch continues to be used for the latest state-of-the-art research on display at the NeurIPS conference next week, making up nearly [70% of papers](https://chillee.github.io/pytorch-vs-tensorflow/) that cite a framework. In addition, we’re excited to welcome Preferred Networks, the maintainers of the Chainer framework, to the PyTorch community. Their teams are moving fully over to PyTorch for developing their ML capabilities and services. diff --git a/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md b/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md index d218f78955f5..da87425b0d78 100644 --- a/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md +++ b/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md @@ -4,6 +4,7 @@ title: 'Stochastic Weight Averaging in PyTorch' author: Pavel Izmailov and Andrew Gordon Wilson redirect_from: /2019/04/29/road-to-1.0.html image: /assets/images/bert2.png +tags: [two] --- In this blogpost we describe the recently proposed Stochastic Weight Averaging (SWA) technique [1, 2], and its new implementation in [`torchcontrib`](https://github.com/pytorch/contrib). SWA is a simple procedure that improves generalization in deep learning over Stochastic Gradient Descent (SGD) at no additional cost, and can be used as a drop-in replacement for any other optimizer in PyTorch. SWA has a wide range of applications and features: diff --git a/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md b/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md index 7e7274e5b86e..c985f166b0dd 100644 --- a/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md +++ b/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md @@ -4,6 +4,7 @@ title: "Optimizing CUDA Recurrent Neural Networks with TorchScript" author: "The PyTorch Team" date: 2019-05-01 8:00:00 -0500 image: /assets/images/bert2.png +tags: [three] --- This week, we officially released PyTorch 1.1, a large feature update to PyTorch 1.0. One of the new features we've added is better support for fast, custom Recurrent Neural Networks (fastrnns) with TorchScript (the PyTorch JIT) (https://pytorch.org/docs/stable/jit.html). diff --git a/_posts/2019-5-22-torchvision03.md b/_posts/2019-5-22-torchvision03.md index 409f0ffc766f..c04ffa985268 100644 --- a/_posts/2019-5-22-torchvision03.md +++ b/_posts/2019-5-22-torchvision03.md @@ -4,6 +4,7 @@ title: 'torchvision 0.3: segmentation, detection models, new datasets and more.. author: Francisco Massa redirect_from: /2019/05/23/torchvision03.html image: /assets/images/bert2.png +tags: [three] --- PyTorch domain libraries like torchvision provide convenient access to common datasets and models that can be used to quickly create a state-of-the-art baseline. Moreover, they also provide common abstractions to reduce boilerplate code that users might have to otherwise repeatedly write. The torchvision 0.3 release brings several new features including models for semantic segmentation, object detection, instance segmentation, and person keypoint detection, as well as custom C++ / CUDA ops specific to computer vision. diff --git a/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md b/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md index be064e484b36..bddcd28d179f 100644 --- a/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md +++ b/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Introducing native PyTorch automatic mixed precision for faster training on NVIDIA GPUs' author: Mengdi Huang, Chetan Tekur, Michael Carilli image: /assets/images/bert2.png +tags: [five] --- Most deep learning frameworks, including PyTorch, train with 32-bit floating point (FP32) arithmetic by default. However this is not essential to achieve full accuracy for many deep learning models. In 2017, NVIDIA researchers developed a methodology for [mixed-precision training](https://developer.nvidia.com/blog/mixed-precision-training-deep-neural-networks/), which combined [single-precision](https://blogs.nvidia.com/blog/2019/11/15/whats-the-difference-between-single-double-multi-and-mixed-precision-computing/) (FP32) with half-precision (e.g. FP16) format when training a network, and achieved the same accuracy as FP32 training using the same hyperparameters, with additional performance benefits on NVIDIA GPUs: diff --git a/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md b/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md index bff167952881..b0cc6a309bd7 100644 --- a/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md +++ b/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Microsoft becomes maintainer of the Windows version of PyTorch' author: Maxim Lukiyanov - Principal PM at Microsoft, Emad Barsoum - Group EM at Microsoft, Guoliang Hua - Principal EM at Microsoft, Nikita Shulga - Tech Lead at Facebook, Geeta Chauhan - PE Lead at Facebook, Chris Gottbrath - Technical PM at Facebook, Jiachen Pu - Engineer at Facebook image: /assets/images/bert2.png +tags: [five] --- Along with the PyTorch 1.6 release, we are excited to announce that Microsoft has expanded its participation in the PyTorch community and is taking ownership of the development and maintenance of the PyTorch build for Windows. diff --git a/_posts/2020-07-28-pytorch-feature-classification-changes.md b/_posts/2020-07-28-pytorch-feature-classification-changes.md index 83615f83c379..1dbb4d5dd7a8 100644 --- a/_posts/2020-07-28-pytorch-feature-classification-changes.md +++ b/_posts/2020-07-28-pytorch-feature-classification-changes.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch feature classification changes' author: Team PyTorch image: /assets/images/bert2.png +tags: [six] --- Traditionally features in PyTorch were classified as either stable or experimental with an implicit third option of testing bleeding edge features by building master or through installing nightly builds (available via prebuilt whls). This has, in a few cases, caused some confusion around the level of readiness, commitment to the feature and backward compatibility that can be expected from a user perspective. Moving forward, we’d like to better classify the 3 types of features as well as define explicitly here what each mean from a user perspective. diff --git a/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md b/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md index 0c4f3f14c799..485090f8eaca 100644 --- a/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md +++ b/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Efficient PyTorch I/O library for Large Datasets, Many Files, Many GPUs' author: Alex Aizman, Gavin Maltby, Thomas Breuel image: /assets/images/bert2.png +tags: [six] --- Data sets are growing bigger every day and GPUs are getting faster. This means there are more data sets for deep learning researchers and engineers to train and validate their models. diff --git a/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md b/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md index 6503e3ce4035..8fdee5859d13 100644 --- a/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md +++ b/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md @@ -3,9 +3,12 @@ layout: blog_detail title: 'PyTorch 1.6 now includes Stochastic Weight Averaging' author: Pavel Izmailov, Andrew Gordon Wilson and Vincent Queneneville-Belair image: /assets/images/bert2.png +tags: [six] --- -Do you use stochastic gradient descent (SGD) or Adam? Regardless of the procedure you use to train your neural network, you can likely achieve significantly better generalization at virtually no additional cost with a simple new technique now natively supported in PyTorch 1.6, Stochastic Weight Averaging (SWA) [1]. Even if you have already trained your model, it’s easy to realize the benefits of SWA by running SWA for a small number of epochs starting with a pre-trained model. [Again](https://twitter.com/MilesCranmer/status/1282140440892932096) and [again](https://twitter.com/leopd/status/1285969855062192129), researchers are discovering that SWA improves the performance of well-tuned models in a wide array of practical applications with little cost or effort! +Do you use stochastic gradient descent (SGD) or Adam? Regardless of the procedure you use to train your neural network, you can likely achieve significantly better generalization at virtually no additional cost with a simple new technique now natively supported in PyTorch 1.6, Stochastic Weight Averaging (SWA) [1]. Even if you have already trained your model, it’s easy to realize the benefits of SWA by running SWA for a small number of epochs starting with a pre-trained model. + +[Again](https://twitter.com/MilesCranmer/status/1282140440892932096) and [again](https://twitter.com/leopd/status/1285969855062192129), researchers are discovering that SWA improves the performance of well-tuned models in a wide array of practical applications with little cost or effort! SWA has a wide range of applications and features: diff --git a/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md b/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md index e71ccc435661..04c9de986861 100644 --- a/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md +++ b/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch 1.4 released, domain libraries updated' author: Team PyTorch image: /assets/images/bert2.png +tags: [five] --- Today, we’re announcing the availability of PyTorch 1.4, along with updates to the PyTorch domain libraries. These releases build on top of the announcements from [NeurIPS 2019](https://pytorch.org/blog/pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community/), where we shared the availability of PyTorch Elastic, a new classification framework for image and video, and the addition of Preferred Networks to the PyTorch community. For those that attended the workshops at NeurIPS, the content can be found [here](https://research.fb.com/neurips-2019-expo-workshops/). diff --git a/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md b/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md index 21b923f9e1d7..8147c848ef4e 100644 --- a/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md +++ b/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Introduction to Quantization on PyTorch' author: Raghuraman Krishnamoorthi, James Reed, Min Ni, Chris Gottbrath, and Seth Weidman image: /assets/images/bert2.png +tags: [five] --- It’s important to make efficient use of both server-side and on-device compute resources when developing machine learning applications. To support more efficient deployment on servers and edge devices, PyTorch added a support for model quantization using the familiar eager mode Python API. diff --git a/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md b/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md index 69cfa9843f19..7a2e4c4a4ae9 100644 --- a/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md +++ b/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch 1.5 released, new and updated APIs including C++ frontend API parity with Python' author: Team PyTorch image: /assets/images/bert2.png +tags: [five] --- diff --git a/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md b/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md index 904c03800a0d..ed04da3743f3 100644 --- a/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md +++ b/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch library updates including new model serving library ' author: Team PyTorch image: /assets/images/bert2.png +tags: [five] --- diff --git a/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md b/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md index 7fc6210a4b77..c9a91516693c 100644 --- a/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md +++ b/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'Updates & Improvements to PyTorch Tutorials' author: Team PyTorch image: /assets/images/bert2.png +tags: [five] --- PyTorch.org provides researchers and developers with documentation, installation instructions, latest news, community projects, tutorials, and more. Today, we are introducing usability and content improvements including tutorials in additional categories, a new recipe format for quickly referencing common topics, sorting using tags, and an updated homepage. diff --git a/_posts/2020-7-28-pytorch-1.6-released.md b/_posts/2020-7-28-pytorch-1.6-released.md index 5150ea8b854b..b6920fb85526 100644 --- a/_posts/2020-7-28-pytorch-1.6-released.md +++ b/_posts/2020-7-28-pytorch-1.6-released.md @@ -3,6 +3,7 @@ layout: blog_detail title: 'PyTorch 1.6 released w/ Native AMP Support, Microsoft joins as maintainers for Windows' author: Team PyTorch image: /assets/images/bert2.png +tags: [five] --- Today, we’re announcing the availability of PyTorch 1.6, along with updated domain libraries. We are also excited to announce the team at [Microsoft is now maintaining Windows builds and binaries](https://pytorch.org/blog/microsoft-becomes-maintainer-of-the-windows-version-of-pytorch) and will also be supporting the community on GitHub as well as the PyTorch Windows discussion forums. diff --git a/blog/all-posts.html b/blog/all-posts.html index 36048d2667b4..d2ac7eb621c4 100644 --- a/blog/all-posts.html +++ b/blog/all-posts.html @@ -16,12 +16,12 @@
{% for post in posts %} -
+

{{ post.title }}

-

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

+

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500 | strip_html }}

{{ post.date | date: '%B %d, %Y' }}

{% endfor %} @@ -35,4 +35,4 @@

- + diff --git a/blog/landing-page.html b/blog/landing-page.html index 718d3e045c7c..c166f4c17ad4 100644 --- a/blog/landing-page.html +++ b/blog/landing-page.html @@ -23,8 +23,7 @@

{{ post.title }}

-

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500}}

- +

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500 | strip_html }}

{% endfor %}
From 417980c6e736fd4cd17111dbf37af436b538d850 Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Fri, 28 Aug 2020 10:09:46 -0400 Subject: [PATCH 06/10] wip - sort button --- blog/all-posts.html | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/blog/all-posts.html b/blog/all-posts.html index d2ac7eb621c4..ba80baa74df1 100644 --- a/blog/all-posts.html +++ b/blog/all-posts.html @@ -12,8 +12,20 @@
-
+ {% assign all_tags = posts | map: "tags" | join: ',' | split: ',' | uniq | sort %} + + +
{% for post in posts %}
From 370cf0ff8559cb30f6a5c5eb91c8213415614fd7 Mon Sep 17 00:00:00 2001 From: Brandon Green Date: Mon, 31 Aug 2020 12:55:32 -0400 Subject: [PATCH 07/10] Add preview to blog posts, style updates This commit adds preview to blog posts and updates style for sort button --- _includes/blog_jumbotron.html | 2 +- .../2017-5-11-a-tour-of-pytorch-internals-1.md | 1 + .../2017-6-27-a-tour-of-pytorch-internals-2.md | 1 + _posts/2018-01-19-a-year-in.md | 1 + _posts/2018-03-5-tensor-comprehensions.md | 1 + .../2018-04-22-pytorch-0_4_0-migration-guide.md | 1 + _posts/2018-05-2-the-road-to-1_0.md | 1 + _posts/2019-05-08-model-serving-in-pyorch.md | 1 + _posts/2019-05-1-pytorch-adds-new-dev-tools.md | 1 + ...rds-reproducible-research-with-pytorch-hub.md | 1 + _posts/2019-07-18-pytorch-ecosystem.md | 1 + _posts/2019-07-23-mapillary-research.md | 1 + ...9-08-08-pytorch-1.2-and-domain-api-release.md | 1 + ...ile-privacy-quantization-and-named-tensors.md | 1 + ...llowship-funding-for-privacy-preserving-ml.md | 3 ++- ...lcomes-preferred-networks-to-its-community.md | 1 + ...-29-stochastic-weight-averaging-in-pytorch.md | 1 + ...9-5-1-optimizing-cuda-rnn-with-torchscript.md | 6 +++--- _posts/2019-5-22-torchvision03.md | 1 + ...pus-with-pytorch-automatic-mixed-precision.md | 3 ++- ...intainer-of-the-windows-version-of-pytorch.md | 1 + ...-28-pytorch-feature-classification-changes.md | 1 + ...ry-for-large-datasets-many-files-many-gpus.md | 1 + ...6-now-includes-stochastic-weight-averaging.md | 1 + ...ot-4-released-and-domain-libraries-updated.md | 1 + ...26-introduction-to-quantization-on-pytorch.md | 1 + ...1-dot-5-released-with-new-and-updated-apis.md | 3 ++- ...-library-updates-new-model-serving-library.md | 1 + ...-updates-improvements-to-pytorch-tutorials.md | 1 + _posts/2020-7-28-pytorch-1.6-released.md | 1 + _sass/blog.scss | 16 ++++++++++++++++ blog/all-posts.html | 2 +- 32 files changed, 52 insertions(+), 8 deletions(-) diff --git a/_includes/blog_jumbotron.html b/_includes/blog_jumbotron.html index 7bf899214e14..04baadb1e7b1 100644 --- a/_includes/blog_jumbotron.html +++ b/_includes/blog_jumbotron.html @@ -5,7 +5,7 @@

{{ post.title }}

-

' | remove: '

' | truncate: 100 }}

+ Read More diff --git a/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md b/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md index 3ebc97c3262e..acb60f83c730 100644 --- a/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md +++ b/_posts/2017-5-11-a-tour-of-pytorch-internals-1.md @@ -6,6 +6,7 @@ date: 2017-05-11 12:00:00 -0500 redirect_from: /2017/05/11/Internals.html image: /assets/images/bert2.png tags: [one] +preview: 'The fundamental unit in PyTorch is the Tensor. This post will serve as an overview for how we implement Tensors in PyTorch, such that the user can interact with it from the Python shell.' --- The fundamental unit in PyTorch is the Tensor. This post will serve as an overview for how we implement Tensors in PyTorch, such that the user can interact with it from the Python shell. In particular, we want to answer four main questions: diff --git a/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md b/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md index 8452d02ff7a9..00274d07d89c 100644 --- a/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md +++ b/_posts/2017-6-27-a-tour-of-pytorch-internals-2.md @@ -6,6 +6,7 @@ date: 2017-06-27 12:00:00 -0500 redirect_from: /2017/06/27/Internals2.html image: /assets/images/bert2.png tags: [one] +preview: 'In the first I explained how we generate a `torch.Tensor` object that you can use in your Python interpreter. Next, I will explore the build system for PyTorch.' --- In the first [post]({{ site.baseurl }}{% link _posts/2017-5-11-a-tour-of-pytorch-internals-1.md %}) I explained how we generate a `torch.Tensor` object that you can use in your Python interpreter. Next, I will explore the build system for PyTorch. The PyTorch codebase has a variety of components: diff --git a/_posts/2018-01-19-a-year-in.md b/_posts/2018-01-19-a-year-in.md index 25d213842698..425c3c3bb498 100644 --- a/_posts/2018-01-19-a-year-in.md +++ b/_posts/2018-01-19-a-year-in.md @@ -6,6 +6,7 @@ date: 2018-01-19 12:00:00 -0500 redirect_from: /2018/01/19/a-year-in.html image: /assets/images/bert2.png tags: [one] +preview: Today marks 1 year since PyTorch was released publicly. It's been a wild ride — our quest to build a flexible deep learning research platform. Over the last year, we've seen an amazing community of people using, contributing to and evangelizing PyTorch — thank you for the love. --- Today marks 1 year since PyTorch was released publicly. It's been a wild ride — our quest to build a flexible deep learning research platform. Over the last year, we've seen an amazing community of people using, contributing to and evangelizing PyTorch — thank you for the love. diff --git a/_posts/2018-03-5-tensor-comprehensions.md b/_posts/2018-03-5-tensor-comprehensions.md index 5b2391731581..c1e8b0afb124 100644 --- a/_posts/2018-03-5-tensor-comprehensions.md +++ b/_posts/2018-03-5-tensor-comprehensions.md @@ -5,6 +5,7 @@ author: Priya Goyal (FAIR), Nicolas Vasilache (FAIR), Oleksandr Zinenko (Inria & redirect_from: /2018/03/05/tensor-comprehensions.html image: /assets/images/bert2.png tags: [two] +preview: 'Tensor Comprehensions (TC) is a tool that lowers the barrier for writing high-performance code. It generates GPU code from a simple high-level language and autotunes the code for specific input sizes.' --- Tensor Comprehensions (TC) is a tool that lowers the barrier for writing high-performance code. It generates GPU code from a simple high-level language and autotunes the code for specific input sizes. diff --git a/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md b/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md index 0934bc62758b..0c06db51cbf7 100644 --- a/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md +++ b/_posts/2018-04-22-pytorch-0_4_0-migration-guide.md @@ -4,6 +4,7 @@ title: 'PyTorch 0.4.0 Migration Guide' redirect_from: /2018/04/22/0_4_0-migration-guide.html image: /assets/images/bert2.png tags: [two] +preview: 'Welcome to the migration guide for PyTorch 0.4.0. In this release we introduced [many exciting new features and critical bug fixes](https://github.com/pytorch/pytorch/releases/tag/v0.4.0), with the goal of providing users a better and cleaner interface.' --- Welcome to the migration guide for PyTorch 0.4.0. In this release we introduced [many exciting new features and critical bug fixes](https://github.com/pytorch/pytorch/releases/tag/v0.4.0), with the goal of providing users a better and cleaner interface. In this guide, we will cover the most important changes in migrating existing code from previous versions: diff --git a/_posts/2018-05-2-the-road-to-1_0.md b/_posts/2018-05-2-the-road-to-1_0.md index f5640bcefb5e..9341e9c6c41c 100644 --- a/_posts/2018-05-2-the-road-to-1_0.md +++ b/_posts/2018-05-2-the-road-to-1_0.md @@ -5,6 +5,7 @@ author: The PyTorch Team redirect_from: /2018/05/02/road-to-1.0.html image: /assets/images/bert2.png tags: [two] +preview: We would like to give you a preview of the roadmap for PyTorch 1.0 , the next release of PyTorch. Over the last year, we've had 0.2, 0.3 and 0.4 transform PyTorch from a [Torch+Chainer]-like interface into something cleaner, adding double-backwards, numpy-like functions, advanced indexing and removing Variable boilerplate. --- We would like to give you a preview of the roadmap for PyTorch 1.0 , the next release of PyTorch. Over the last year, we've had 0.2, 0.3 and 0.4 transform PyTorch from a [Torch+Chainer]-like interface into something cleaner, adding double-backwards, numpy-like functions, advanced indexing and removing Variable boilerplate. At this time, we're confident that the API is in a reasonable and stable state to confidently release a 1.0. diff --git a/_posts/2019-05-08-model-serving-in-pyorch.md b/_posts/2019-05-08-model-serving-in-pyorch.md index 264bbccfe2dd..08887466dda9 100644 --- a/_posts/2019-05-08-model-serving-in-pyorch.md +++ b/_posts/2019-05-08-model-serving-in-pyorch.md @@ -5,6 +5,7 @@ author: Jeff Smith redirect_from: /2019/05/08/model-serving-in-pyorch.html image: /assets/images/bert2.png tags: [three] +preview: 'PyTorch has seen a lot of adoption in research, but people can get confused about how well PyTorch models can be taken into production. This blog post is meant to clear up any confusion people might have about the road to production in PyTorch. Usually when people talk about taking a model “to production,” they usually mean performing **inference**, sometimes called model evaluation or prediction or serving. At the level of a function call, in PyTorch, inference looks something like this:' --- PyTorch has seen a lot of adoption in research, but people can get confused about how well PyTorch models can be taken into production. This blog post is meant to clear up any confusion people might have about the road to production in PyTorch. diff --git a/_posts/2019-05-1-pytorch-adds-new-dev-tools.md b/_posts/2019-05-1-pytorch-adds-new-dev-tools.md index 0db155bc65f3..ee237d6609e6 100644 --- a/_posts/2019-05-1-pytorch-adds-new-dev-tools.md +++ b/_posts/2019-05-1-pytorch-adds-new-dev-tools.md @@ -4,6 +4,7 @@ title: 'PyTorch adds new dev tools as it hits production scale' author: The PyTorch Team image: /assets/images/bert2.png tags: [three] +preview: Since its release just a few months ago, PyTorch 1.0 has been rapidly adopted as a powerful, flexible deep learning platform that enables engineers and researchers to move quickly from research to production. We are highlighting some of the ways the AI engineering and research community is using PyTorch 1.0. We’re also sharing new details about the latest release, PyTorch 1.1, and showcasing some of the new development tools created by the community. --- _This is a partial re-post of the original blog post on the Facebook AI Blog. The full post can be [viewed here](https://ai.facebook.com/blog/pytorch-adds-new-dev-tools-as-it-hits-production-scale/)_ diff --git a/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md b/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md index 510f98af3835..3aac26101334 100644 --- a/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md +++ b/_posts/2019-06-10-towards-reproducible-research-with-pytorch-hub.md @@ -5,6 +5,7 @@ author: Team PyTorch redirect_from: /2019/06/10/pytorch_hub.html image: /assets/images/bert2.png tags: [three] +preview: 'Reproducibility is an essential requirement for many fields of research including those based on machine learning techniques. However, many machine learning publications are either not reproducible or are difficult to reproduce. With the continued growth in the number of research publications, including tens of thousands of papers now hosted on arXiv and submissions to conferences at an all time high, research reproducibility is more important than ever. While many of these publications are accompanied by code as well as trained models which is helpful but still leaves a number of steps for users to figure out for themselves.' --- Reproducibility is an essential requirement for many fields of research including those based on machine learning techniques. However, many machine learning publications are either not reproducible or are difficult to reproduce. With the continued growth in the number of research publications, including tens of thousands of papers now hosted on arXiv and submissions to conferences at an all time high, research reproducibility is more important than ever. While many of these publications are accompanied by code as well as trained models which is helpful but still leaves a number of steps for users to figure out for themselves. diff --git a/_posts/2019-07-18-pytorch-ecosystem.md b/_posts/2019-07-18-pytorch-ecosystem.md index c61de744edb1..e959065b6542 100644 --- a/_posts/2019-07-18-pytorch-ecosystem.md +++ b/_posts/2019-07-18-pytorch-ecosystem.md @@ -4,6 +4,7 @@ title: 'PyTorch Adds New Ecosystem Projects for Encrypted AI and Quantum Computi author: Team PyTorch image: /assets/images/bert2.png tags: [four] +preview: 'The PyTorch ecosystem includes projects, tools, models and libraries from a broad community of researchers in academia and industry, application developers, and ML engineers. The goal of this ecosystem is to support, accelerate, and aid in your exploration with PyTorch and help you push the state of the art, no matter what field you are exploring. Similarly, we are expanding the recently launched PyTorch Hub to further help you discover and reproduce the latest research.' --- The PyTorch ecosystem includes projects, tools, models and libraries from a broad community of researchers in academia and industry, application developers, and ML engineers. The goal of this ecosystem is to support, accelerate, and aid in your exploration with PyTorch and help you push the state of the art, no matter what field you are exploring. Similarly, we are expanding the recently launched PyTorch Hub to further help you discover and reproduce the latest research. diff --git a/_posts/2019-07-23-mapillary-research.md b/_posts/2019-07-23-mapillary-research.md index de2f5ba261c2..8f840c4ef921 100644 --- a/_posts/2019-07-23-mapillary-research.md +++ b/_posts/2019-07-23-mapillary-research.md @@ -5,6 +5,7 @@ author: Lorenzo Porzi, Mapillary redirect_from: /2019/07/23/mapillary-research.html image: /assets/images/bert2.png tags: [four] +preview: With roads in developed countries like the US changing up to 15% annually, Mapillary addresses a growing demand for keeping maps updated by combining images from any camera into a 3D visualization of the world. Mapillary's independent and collaborative approach enables anyone to collect, share, and use street-level images for improving maps, developing cities, and advancing the automotive industry. --- With roads in developed countries like the US changing up to 15% annually, Mapillary addresses a growing demand for keeping maps updated by combining images from any camera into a 3D visualization of the world. Mapillary's independent and collaborative approach enables anyone to collect, share, and use street-level images for improving maps, developing cities, and advancing the automotive industry. diff --git a/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md b/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md index 1aacb55e0aa2..11a33b6fb585 100644 --- a/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md +++ b/_posts/2019-08-08-pytorch-1.2-and-domain-api-release.md @@ -5,6 +5,7 @@ author: Team PyTorch redirect_from: /2019/08/06/pytorch_aug2019_releases.html image: /assets/images/bert2.png tags: [four] +preveiw: 'Since the release of PyTorch 1.0, we’ve seen the community expand to add new tools, contribute to a growing set of models available in the PyTorch Hub, and continually increase usage in both research and production.' --- Since the release of PyTorch 1.0, we’ve seen the community expand to add new tools, contribute to a growing set of models available in the PyTorch Hub, and continually increase usage in both research and production. diff --git a/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md b/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md index 0eb7a4531c1f..4f064da9e039 100644 --- a/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md +++ b/_posts/2019-10-10-pytorch-1-dot-3-adds-mobile-privacy-quantization-and-named-tensors.md @@ -4,6 +4,7 @@ title: 'PyTorch 1.3 adds mobile, privacy, quantization, and named tensors' author: Team PyTorch image: /assets/images/bert2.png tags: [four] +preview: 'PyTorch continues to gain momentum because of its focus on meeting the needs of researchers, its streamlined workflow for production use, and most of all because of the enthusiastic support it has received from the AI community. PyTorch citations in papers on ArXiv [grew 194 percent in the first half of 2019 alone, as noted by O’Reilly](https://www.oreilly.com/ideas/one-simple-graphic-researchers-love-pytorch-and-tensorflow?fbclid=IwAR3kYmlyD7zky37IYFu0cafQn7yemhl8P-7MNyB30z0q5RDzxcTOrP8kxDk), and the number of contributors to the platform has grown more than 50 percent over the last year, to nearly 1,200. Facebook, Microsoft, Uber, and other organizations across industries are increasingly using it as the foundation for their most important machine learning (ML) research and production workloads.' --- PyTorch continues to gain momentum because of its focus on meeting the needs of researchers, its streamlined workflow for production use, and most of all because of the enthusiastic support it has received from the AI community. PyTorch citations in papers on ArXiv [grew 194 percent in the first half of 2019 alone, as noted by O’Reilly](https://www.oreilly.com/ideas/one-simple-graphic-researchers-love-pytorch-and-tensorflow?fbclid=IwAR3kYmlyD7zky37IYFu0cafQn7yemhl8P-7MNyB30z0q5RDzxcTOrP8kxDk), and the number of contributors to the platform has grown more than 50 percent over the last year, to nearly 1,200. Facebook, Microsoft, Uber, and other organizations across industries are increasingly using it as the foundation for their most important machine learning (ML) research and production workloads. diff --git a/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md b/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md index 8ce3af280fc4..3642ccfc4352 100644 --- a/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md +++ b/_posts/2019-12-06-openmined-and-pytorch-launch-fellowship-funding-for-privacy-preserving-ml.md @@ -4,7 +4,8 @@ title: 'OpenMined and PyTorch partner to launch fellowship funding for privacy-p author: Andrew Trask (OpenMined/U.Oxford), Shubho Sengupta, Laurens van der Maaten, Joe Spisak excerpt: Many applications of machine learning (ML) pose a range of security and privacy challenges. image: /assets/images/bert2.png -tags: [four] +tags: [red] +preview: Many applications of machine learning (ML) pose a range of security and privacy challenges. In particular, users may not be willing or allowed to share their data, which prevents them from taking full advantage of ML platforms like PyTorch. To take the field of privacy-preserving ML (PPML) forward, OpenMined and PyTorch are announcing plans to jointly develop a combined platform to accelerate PPML research as well as new funding for fellowships.' ---
diff --git a/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md b/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md index d51f001077ac..01d07886b4b2 100644 --- a/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md +++ b/_posts/2019-12-06-pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community.md @@ -4,6 +4,7 @@ title: 'PyTorch adds new tools and libraries, welcomes Preferred Networks to its author: Team PyTorch image: /assets/images/bert2.png tags: [four] +preview: 'PyTorch continues to be used for the latest state-of-the-art research on display at the NeurIPS conference next week, making up nearly [70% of papers](https://chillee.github.io/pytorch-vs-tensorflow/) that cite a framework. In addition, we’re excited to welcome Preferred Networks, the maintainers of the Chainer framework, to the PyTorch community. Their teams are moving fully over to PyTorch for developing their ML capabilities and services.' --- PyTorch continues to be used for the latest state-of-the-art research on display at the NeurIPS conference next week, making up nearly [70% of papers](https://chillee.github.io/pytorch-vs-tensorflow/) that cite a framework. In addition, we’re excited to welcome Preferred Networks, the maintainers of the Chainer framework, to the PyTorch community. Their teams are moving fully over to PyTorch for developing their ML capabilities and services. diff --git a/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md b/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md index da87425b0d78..9c7acd9dc3fe 100644 --- a/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md +++ b/_posts/2019-4-29-stochastic-weight-averaging-in-pytorch.md @@ -5,6 +5,7 @@ author: Pavel Izmailov and Andrew Gordon Wilson redirect_from: /2019/04/29/road-to-1.0.html image: /assets/images/bert2.png tags: [two] +preview: 'In this blogpost we describe the recently proposed Stochastic Weight Averaging (SWA) technique [1, 2], and its new implementation in [`torchcontrib`](https://github.com/pytorch/contrib). SWA is a simple procedure that improves generalization in deep learning over Stochastic Gradient Descent (SGD) at no additional cost, and can be used as a drop-in replacement for any other optimizer in PyTorch.' --- In this blogpost we describe the recently proposed Stochastic Weight Averaging (SWA) technique [1, 2], and its new implementation in [`torchcontrib`](https://github.com/pytorch/contrib). SWA is a simple procedure that improves generalization in deep learning over Stochastic Gradient Descent (SGD) at no additional cost, and can be used as a drop-in replacement for any other optimizer in PyTorch. SWA has a wide range of applications and features: diff --git a/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md b/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md index c985f166b0dd..4b68a412daa6 100644 --- a/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md +++ b/_posts/2019-5-1-optimizing-cuda-rnn-with-torchscript.md @@ -1,10 +1,10 @@ --- layout: blog_detail -title: "Optimizing CUDA Recurrent Neural Networks with TorchScript" -author: "The PyTorch Team" -date: 2019-05-01 8:00:00 -0500 +title: 'Optimizing CUDA Recurrent Neural Networks with TorchScript' +author: The PyTorch Team image: /assets/images/bert2.png tags: [three] +preview: This week, we officially released PyTorch 1.1, a large feature update to PyTorch 1.0. One of the new features we've added is better support for fast, custom Recurrent Neural Networks (fastrnns) with TorchScript (the PyTorch JIT) (https://pytorch.org/docs/stable/jit.html) --- This week, we officially released PyTorch 1.1, a large feature update to PyTorch 1.0. One of the new features we've added is better support for fast, custom Recurrent Neural Networks (fastrnns) with TorchScript (the PyTorch JIT) (https://pytorch.org/docs/stable/jit.html). diff --git a/_posts/2019-5-22-torchvision03.md b/_posts/2019-5-22-torchvision03.md index c04ffa985268..1a4bd9330ac0 100644 --- a/_posts/2019-5-22-torchvision03.md +++ b/_posts/2019-5-22-torchvision03.md @@ -5,6 +5,7 @@ author: Francisco Massa redirect_from: /2019/05/23/torchvision03.html image: /assets/images/bert2.png tags: [three] +preview: 'PyTorch domain libraries like torchvision provide convenient access to common datasets and models that can be used to quickly create a state-of-the-art baseline. Moreover, they also provide common abstractions to reduce boilerplate code that users might have to otherwise repeatedly write. The torchvision 0.3 release brings several new features including models for semantic segmentation, object detection, instance segmentation, and person keypoint detection, as well as custom C++ / CUDA ops specific to computer vision.' --- PyTorch domain libraries like torchvision provide convenient access to common datasets and models that can be used to quickly create a state-of-the-art baseline. Moreover, they also provide common abstractions to reduce boilerplate code that users might have to otherwise repeatedly write. The torchvision 0.3 release brings several new features including models for semantic segmentation, object detection, instance segmentation, and person keypoint detection, as well as custom C++ / CUDA ops specific to computer vision. diff --git a/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md b/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md index bddcd28d179f..a79ba620b99f 100644 --- a/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md +++ b/_posts/2020-07-28-accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision.md @@ -4,9 +4,10 @@ title: 'Introducing native PyTorch automatic mixed precision for faster training author: Mengdi Huang, Chetan Tekur, Michael Carilli image: /assets/images/bert2.png tags: [five] +preview: 'Most deep learning frameworks, including PyTorch, train with 32-bit floating point (FP32) arithmetic by default. However this is not essential to achieve full accuracy for many deep learning models. In 2017, NVIDIA researchers developed a methodology for (FP32) with half-precision (e.g. FP16) format when training a network, and achieved the same accuracy as FP32 training using the same hyperparameters, with additional performance benefits on NVIDIA GPUs:' --- -Most deep learning frameworks, including PyTorch, train with 32-bit floating point (FP32) arithmetic by default. However this is not essential to achieve full accuracy for many deep learning models. In 2017, NVIDIA researchers developed a methodology for [mixed-precision training](https://developer.nvidia.com/blog/mixed-precision-training-deep-neural-networks/), which combined [single-precision](https://blogs.nvidia.com/blog/2019/11/15/whats-the-difference-between-single-double-multi-and-mixed-precision-computing/) (FP32) with half-precision (e.g. FP16) format when training a network, and achieved the same accuracy as FP32 training using the same hyperparameters, with additional performance benefits on NVIDIA GPUs: +Most deep learning frameworks, including PyTorch, train with 32-bit floating point (FP32) arithmetic by default. However this is not essential to achieve full accuracy for many deep learning models. In 2017, NVIDIA researchers developed a methodology for (FP32) with half-precision (e.g. FP16) format when training a network, and achieved the same accuracy as FP32 training using the same hyperparameters, with additional performance benefits on NVIDIA GPUs: * Shorter training time; * Lower memory requirements, enabling larger batch sizes, larger models, or larger inputs. diff --git a/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md b/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md index b0cc6a309bd7..9f67193bdf09 100644 --- a/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md +++ b/_posts/2020-07-28-microsoft-becomes-maintainer-of-the-windows-version-of-pytorch.md @@ -4,6 +4,7 @@ title: 'Microsoft becomes maintainer of the Windows version of PyTorch' author: Maxim Lukiyanov - Principal PM at Microsoft, Emad Barsoum - Group EM at Microsoft, Guoliang Hua - Principal EM at Microsoft, Nikita Shulga - Tech Lead at Facebook, Geeta Chauhan - PE Lead at Facebook, Chris Gottbrath - Technical PM at Facebook, Jiachen Pu - Engineer at Facebook image: /assets/images/bert2.png tags: [five] +preview: 'Along with the PyTorch 1.6 release, we are excited to announce that Microsoft has expanded its participation in the PyTorch community and is taking ownership of the development and maintenance of the PyTorch build for Windows.' --- Along with the PyTorch 1.6 release, we are excited to announce that Microsoft has expanded its participation in the PyTorch community and is taking ownership of the development and maintenance of the PyTorch build for Windows. diff --git a/_posts/2020-07-28-pytorch-feature-classification-changes.md b/_posts/2020-07-28-pytorch-feature-classification-changes.md index 1dbb4d5dd7a8..13c850b83453 100644 --- a/_posts/2020-07-28-pytorch-feature-classification-changes.md +++ b/_posts/2020-07-28-pytorch-feature-classification-changes.md @@ -4,6 +4,7 @@ title: 'PyTorch feature classification changes' author: Team PyTorch image: /assets/images/bert2.png tags: [six] +preview: 'Traditionally features in PyTorch were classified as either stable or experimental with an implicit third option of testing bleeding edge features by building master or through installing nightly builds (available via prebuilt whls). This has, in a few cases, caused some confusion around the level of readiness, commitment to the feature and backward compatibility that can be expected from a user perspective. Moving forward, we’d like to better classify the 3 types of features as well as define explicitly here what each mean from a user perspective.' --- Traditionally features in PyTorch were classified as either stable or experimental with an implicit third option of testing bleeding edge features by building master or through installing nightly builds (available via prebuilt whls). This has, in a few cases, caused some confusion around the level of readiness, commitment to the feature and backward compatibility that can be expected from a user perspective. Moving forward, we’d like to better classify the 3 types of features as well as define explicitly here what each mean from a user perspective. diff --git a/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md b/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md index 485090f8eaca..356940a81d7d 100644 --- a/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md +++ b/_posts/2020-08-11-efficient-pytorch-io-library-for-large-datasets-many-files-many-gpus.md @@ -4,6 +4,7 @@ title: 'Efficient PyTorch I/O library for Large Datasets, Many Files, Many GPUs' author: Alex Aizman, Gavin Maltby, Thomas Breuel image: /assets/images/bert2.png tags: [six] +preview: 'Data sets are growing bigger every day and GPUs are getting faster. This means there are more data sets for deep learning researchers and engineers to train and validate their models.' --- Data sets are growing bigger every day and GPUs are getting faster. This means there are more data sets for deep learning researchers and engineers to train and validate their models. diff --git a/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md b/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md index 8fdee5859d13..047b208eada7 100644 --- a/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md +++ b/_posts/2020-08-18-pytorch-1.6-now-includes-stochastic-weight-averaging.md @@ -4,6 +4,7 @@ title: 'PyTorch 1.6 now includes Stochastic Weight Averaging' author: Pavel Izmailov, Andrew Gordon Wilson and Vincent Queneneville-Belair image: /assets/images/bert2.png tags: [six] +preview: 'Do you use stochastic gradient descent (SGD) or Adam? Regardless of the procedure you use to train your neural network, you can likely achieve significantly better generalization at virtually no additional cost with a simple new technique now natively supported in PyTorch 1.6, Stochastic Weight Averaging (SWA) [1]. Even if you have already trained your model, it’s easy to realize the benefits of SWA by running SWA for a small number of epochs starting with a pre-trained model.' --- Do you use stochastic gradient descent (SGD) or Adam? Regardless of the procedure you use to train your neural network, you can likely achieve significantly better generalization at virtually no additional cost with a simple new technique now natively supported in PyTorch 1.6, Stochastic Weight Averaging (SWA) [1]. Even if you have already trained your model, it’s easy to realize the benefits of SWA by running SWA for a small number of epochs starting with a pre-trained model. diff --git a/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md b/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md index 04c9de986861..dd1de7d70f8b 100644 --- a/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md +++ b/_posts/2020-1-15-pytorch-1-dot-4-released-and-domain-libraries-updated.md @@ -4,6 +4,7 @@ title: 'PyTorch 1.4 released, domain libraries updated' author: Team PyTorch image: /assets/images/bert2.png tags: [five] +preview: 'Today, we’re announcing the availability of PyTorch 1.4, along with updates to the PyTorch domain libraries. These releases build on top of the announcements from [NeurIPS 2019](https://pytorch.org/blog/pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community/), where we shared the availability of PyTorch Elastic, a new classification framework for image and video, and the addition of Preferred Networks to the PyTorch community. For those that attended the workshops at NeurIPS, the content can be found [here](https://research.fb.com/neurips-2019-expo-workshops/).' --- Today, we’re announcing the availability of PyTorch 1.4, along with updates to the PyTorch domain libraries. These releases build on top of the announcements from [NeurIPS 2019](https://pytorch.org/blog/pytorch-adds-new-tools-and-libraries-welcomes-preferred-networks-to-its-community/), where we shared the availability of PyTorch Elastic, a new classification framework for image and video, and the addition of Preferred Networks to the PyTorch community. For those that attended the workshops at NeurIPS, the content can be found [here](https://research.fb.com/neurips-2019-expo-workshops/). diff --git a/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md b/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md index 8147c848ef4e..7dd77f23efd2 100644 --- a/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md +++ b/_posts/2020-3-26-introduction-to-quantization-on-pytorch.md @@ -4,6 +4,7 @@ title: 'Introduction to Quantization on PyTorch' author: Raghuraman Krishnamoorthi, James Reed, Min Ni, Chris Gottbrath, and Seth Weidman image: /assets/images/bert2.png tags: [five] +preview: 'It’s important to make efficient use of both server-side and on-device compute resources when developing machine learning applications. To support more efficient deployment on servers and edge devices, PyTorch added a support for model quantization using the familiar eager mode Python API.' --- It’s important to make efficient use of both server-side and on-device compute resources when developing machine learning applications. To support more efficient deployment on servers and edge devices, PyTorch added a support for model quantization using the familiar eager mode Python API. diff --git a/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md b/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md index 7a2e4c4a4ae9..1793dafd2b12 100644 --- a/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md +++ b/_posts/2020-4-21-pytorch-1-dot-5-released-with-new-and-updated-apis.md @@ -3,7 +3,8 @@ layout: blog_detail title: 'PyTorch 1.5 released, new and updated APIs including C++ frontend API parity with Python' author: Team PyTorch image: /assets/images/bert2.png -tags: [five] +tags: [yellow] +preview: 'Today, we’re announcing the availability of PyTorch 1.5, along with new and updated libraries. This release includes several major new API additions and improvements. PyTorch now includes a significant update to the C++ frontend, ‘channels last’ memory format for computer vision models, and a stable release of the distributed RPC framework used for model-parallel training. The release also has new APIs for autograd for hessians and jacobians, and an API that allows the creation of Custom C++ Classes that was inspired by pybind.' --- diff --git a/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md b/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md index ed04da3743f3..e6f875f8333d 100644 --- a/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md +++ b/_posts/2020-4-21-pytorch-library-updates-new-model-serving-library.md @@ -4,6 +4,7 @@ title: 'PyTorch library updates including new model serving library ' author: Team PyTorch image: /assets/images/bert2.png tags: [five] +preview: 'Along with the PyTorch 1.5 release, we are announcing new libraries for high-performance PyTorch model serving and tight integration with TorchElastic and Kubernetes. Additionally, we are releasing updated packages for torch_xla (Google Cloud TPUs), torchaudio, torchvision, and torchtext. All of these new libraries and enhanced capabilities are available today and accompany all of the core features [released in PyTorch 1.5](https://pytorch.org/blog/pytorch-1-dot-5-released-with-new-and-updated-apis).' --- diff --git a/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md b/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md index c9a91516693c..e5fbffbef2ed 100644 --- a/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md +++ b/_posts/2020-5-5-updates-improvements-to-pytorch-tutorials.md @@ -4,6 +4,7 @@ title: 'Updates & Improvements to PyTorch Tutorials' author: Team PyTorch image: /assets/images/bert2.png tags: [five] +preview: 'PyTorch.org provides researchers and developers with documentation, installation instructions, latest news, community projects, tutorials, and more. Today, we are introducing usability and content improvements including tutorials in additional categories, a new recipe format for quickly referencing common topics, sorting using tags, and an updated homepage.' --- PyTorch.org provides researchers and developers with documentation, installation instructions, latest news, community projects, tutorials, and more. Today, we are introducing usability and content improvements including tutorials in additional categories, a new recipe format for quickly referencing common topics, sorting using tags, and an updated homepage. diff --git a/_posts/2020-7-28-pytorch-1.6-released.md b/_posts/2020-7-28-pytorch-1.6-released.md index b6920fb85526..1bf36e61e665 100644 --- a/_posts/2020-7-28-pytorch-1.6-released.md +++ b/_posts/2020-7-28-pytorch-1.6-released.md @@ -4,6 +4,7 @@ title: 'PyTorch 1.6 released w/ Native AMP Support, Microsoft joins as maintaine author: Team PyTorch image: /assets/images/bert2.png tags: [five] +preview: 'Today, we’re announcing the availability of PyTorch 1.6, along with updated domain libraries. We are also excited to announce the team at [Microsoft is now maintaining Windows builds and binaries](https://pytorch.org/blog/microsoft-becomes-maintainer-of-the-windows-version-of-pytorch) and will also be supporting the community on GitHub as well as the PyTorch Windows discussion forums.' --- Today, we’re announcing the availability of PyTorch 1.6, along with updated domain libraries. We are also excited to announce the team at [Microsoft is now maintaining Windows builds and binaries](https://pytorch.org/blog/microsoft-becomes-maintainer-of-the-windows-version-of-pytorch) and will also be supporting the community on GitHub as well as the PyTorch Windows discussion forums. diff --git a/_sass/blog.scss b/_sass/blog.scss index ac172115134e..6f7677c74518 100644 --- a/_sass/blog.scss +++ b/_sass/blog.scss @@ -239,6 +239,22 @@ } } + #dropdownMenuButton { + cursor: pointer; + z-index: 1; + top: inherit; + left: 23%; + max-width: 4rem; + border: none; + background: inherit; + padding: inherit; + } + + .dropdown-item:hover { + color: $orange; + cursor: pointer; + } + @media (max-width: 1067px) { .jumbotron { h1 { diff --git a/blog/all-posts.html b/blog/all-posts.html index ba80baa74df1..e66a58077ea1 100644 --- a/blog/all-posts.html +++ b/blog/all-posts.html @@ -33,7 +33,7 @@

{{ post.title }}

-

{{ post.excerpt | remove: '

' | remove: '

' | truncate: 500 | strip_html }}

+

{{ post.preview | truncate: 150 }}

{{ post.date | date: '%B %d, %Y' }}

{% endfor %} From e2ea2afe92b15539addda5cbc1795fed47f45dc3 Mon Sep 17 00:00:00 2001 From: Brandon Green Date: Fri, 18 Sep 2020 09:48:38 -0400 Subject: [PATCH 08/10] Update position of sort button --- _sass/blog.scss | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/_sass/blog.scss b/_sass/blog.scss index 6f7677c74518..7d439dc16cd1 100644 --- a/_sass/blog.scss +++ b/_sass/blog.scss @@ -239,11 +239,17 @@ } } + .dropdown { + margin-bottom: 3rem; + } + #dropdownMenuButton { cursor: pointer; + position: absolute; + right: 0; + bottom: 1rem; z-index: 1; top: inherit; - left: 23%; max-width: 4rem; border: none; background: inherit; From 4e15b8df8d7f329741ac5802346a0a68531b3826 Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Thu, 24 Sep 2020 15:01:07 -0400 Subject: [PATCH 09/10] Sort posts based on tag --- assets/filter-hub-tags.js | 34 ++++++++++++++++++++++++++++++---- blog/all-posts.html | 2 +- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/assets/filter-hub-tags.js b/assets/filter-hub-tags.js index 65e59f0339d0..2a96db7ddf4e 100644 --- a/assets/filter-hub-tags.js +++ b/assets/filter-hub-tags.js @@ -2,11 +2,21 @@ var filterScript = $("script[src*=filter-hub-tags]"); var listId = filterScript.attr("list-id"); var displayCount = Number(filterScript.attr("display-count")); var pagination = filterScript.attr("pagination"); +var options; + +if (listId == "all-blog-posts") { + options = { + valueNames: [{ data: ["tags"] }], + page: displayCount + }; +} +else { + options = { + valueNames: ["github-stars-count-whole-number", { data: ["tags", "date-added", "title"] }], + page: displayCount + }; +} -var options = { - valueNames: ["github-stars-count-whole-number", { data: ["tags", "date-added", "title"] }], - page: displayCount -}; $(".next-news-item").on("click" , function(){ $(".pagination").find(".active").next().trigger( "click" ); @@ -101,3 +111,19 @@ $("#sortTitleLow").on("click", function() { $("#sortTitleHigh").on("click", function() { hubList.sort("title", { order: "asc" }); }); + +// Filter the blog posts based on the selected tag + +$(".blog-filter-btn").on("click", function() { + filterBlogPosts($(this).data("tag")); +}); + +function filterBlogPosts(tag) { + hubList.filter(function(item) { + if (item.values().tags == tag) { + return true; + } else { + return false; + } + }) +} diff --git a/blog/all-posts.html b/blog/all-posts.html index e66a58077ea1..a9b442c493e4 100644 --- a/blog/all-posts.html +++ b/blog/all-posts.html @@ -20,7 +20,7 @@
From 0d547f21e1271ba7ab17605055a29f0626546b0e Mon Sep 17 00:00:00 2001 From: Pat Mellon Date: Thu, 24 Sep 2020 15:02:28 -0400 Subject: [PATCH 10/10] Clean up JS --- assets/filter-hub-tags.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/filter-hub-tags.js b/assets/filter-hub-tags.js index 2a96db7ddf4e..dfb5cd80e99f 100644 --- a/assets/filter-hub-tags.js +++ b/assets/filter-hub-tags.js @@ -119,11 +119,11 @@ $(".blog-filter-btn").on("click", function() { }); function filterBlogPosts(tag) { - hubList.filter(function(item) { + hubList.filter(function (item) { if (item.values().tags == tag) { - return true; + return true; } else { - return false; + return false; } - }) + }); }